mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-29 18:33:05 +00:00
Merge branch 'juanfont:main' into main
This commit is contained in:
commit
bbe1327785
124 changed files with 6594 additions and 5311 deletions
65
.github/ISSUE_TEMPLATE/bug_report.md
vendored
65
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,65 +0,0 @@
|
|||
---
|
||||
name: "Bug report"
|
||||
about: "Create a bug report to help us improve"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!--
|
||||
Before posting a bug report, discuss the behaviour you are expecting with the Discord community
|
||||
to make sure that it is truly a bug.
|
||||
The issue tracker is not the place to ask for support or how to set up Headscale.
|
||||
|
||||
Bug reports without the sufficient information will be closed.
|
||||
|
||||
Headscale is a multinational community across the globe. Our language is English.
|
||||
All bug reports needs to be in English.
|
||||
-->
|
||||
|
||||
## Bug description
|
||||
|
||||
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
|
||||
and how it is currently different. If you are unsure if it is a bug, consider discussing
|
||||
it on our Discord server first. -->
|
||||
|
||||
## Environment
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
|
||||
- OS:
|
||||
- Headscale version:
|
||||
- Tailscale version:
|
||||
|
||||
<!--
|
||||
We do not support running Headscale in a container nor behind a (reverse) proxy.
|
||||
If either of these are true for your environment, ask the community in Discord
|
||||
instead of filing a bug report.
|
||||
-->
|
||||
|
||||
- [ ] Headscale is behind a (reverse) proxy
|
||||
- [ ] Headscale runs in a container
|
||||
|
||||
## To Reproduce
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
## Logs and attachments
|
||||
|
||||
<!-- Please attach files with:
|
||||
- Client netmap dump (see below)
|
||||
- ACL configuration
|
||||
- Headscale configuration
|
||||
|
||||
Dump the netmap of tailscale clients:
|
||||
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Please provide information describing the netmap, which client, which headscale version etc.
|
||||
-->
|
83
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
Normal file
83
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
name: 🐞 Bug
|
||||
description: File a bug/issue
|
||||
title: "[Bug] <title>"
|
||||
labels: ["bug", "needs triage"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a support request?
|
||||
description: This issue tracker is for bugs and feature requests only. If you need help, please use ask in our Discord community
|
||||
options:
|
||||
- label: This is not a support request
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
1. With this config...
|
||||
1. Run '...'
|
||||
1. See error...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Headscale version**: 0.22.3
|
||||
- **Tailscale version**: 1.64.0
|
||||
value: |
|
||||
- OS:
|
||||
- Headscale version:
|
||||
- Tailscale version:
|
||||
render: markdown
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Runtime environment
|
||||
options:
|
||||
- label: Headscale is behind a (reverse) proxy
|
||||
required: false
|
||||
- label: Headscale runs in a container
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
- Client netmap dump (see below)
|
||||
- ACL configuration
|
||||
- Headscale configuration
|
||||
|
||||
Dump the netmap of tailscale clients:
|
||||
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Please provide information describing the netmap, which client, which headscale version etc.
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,26 +0,0 @@
|
|||
---
|
||||
name: "Feature request"
|
||||
about: "Suggest an idea for headscale"
|
||||
title: ""
|
||||
labels: ["enhancement"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!--
|
||||
We typically have a clear roadmap for what we want to improve and reserve the right
|
||||
to close feature requests that does not fit in the roadmap, or fit with the scope
|
||||
of the project, or we actually want to implement ourselves.
|
||||
|
||||
Headscale is a multinational community across the globe. Our language is English.
|
||||
All bug reports needs to be in English.
|
||||
-->
|
||||
|
||||
## Why
|
||||
|
||||
<!-- Include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
||||
|
||||
## Description
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
36
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
Normal file
36
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
name: 🚀 Feature Request
|
||||
description: Suggest an idea for Headscale
|
||||
title: "[Feature] <title>"
|
||||
labels: [enhancement]
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Use case
|
||||
description: Please describe the use case for this feature.
|
||||
placeholder: |
|
||||
<!-- Include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: A clear and precise description of what new or changed feature you want.
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Contribution
|
||||
description: Are you willing to contribute to the implementation of this feature?
|
||||
options:
|
||||
- label: I can write the design doc for this feature
|
||||
required: true
|
||||
- label: I can contribute this feature
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How can it be implemented?
|
||||
description: Free text for your ideas on how this feature could be implemented.
|
||||
validations:
|
||||
required: false
|
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
|
@ -12,7 +12,7 @@ If you find mistakes in the documentation, please submit a fix to the documentat
|
|||
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [ ] read the [CONTRIBUTING guidelines](README.md#contributing)
|
||||
- [ ] have read the [CONTRIBUTING.md](./CONTRIBUTING.md) file
|
||||
- [ ] raised a GitHub issue or discussed it on the projects chat beforehand
|
||||
- [ ] added unit tests
|
||||
- [ ] added integration tests
|
||||
|
|
36
.github/workflows/contributors.yml
vendored
36
.github/workflows/contributors.yml
vendored
|
@ -1,36 +0,0 @@
|
|||
name: Contributors
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Delete upstream contributor branch
|
||||
# Allow continue on failure to account for when the
|
||||
# upstream branch is deleted or does not exist.
|
||||
continue-on-error: true
|
||||
run: git push origin --delete update-contributors
|
||||
- name: Create up-to-date contributors branch
|
||||
run: git checkout -B update-contributors
|
||||
- name: Push empty contributors branch
|
||||
run: git push origin update-contributors
|
||||
- name: Switch back to main
|
||||
run: git checkout main
|
||||
- uses: BobAnkh/add-contributors@v0.2.2
|
||||
with:
|
||||
CONTRIBUTOR: "## Contributors"
|
||||
COLUMN_PER_ROW: "6"
|
||||
ACCESS_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
IMG_WIDTH: "100"
|
||||
FONT_SIZE: "14"
|
||||
PATH: "/README.md"
|
||||
COMMIT_MESSAGE: "docs(README): update contributors"
|
||||
AVATAR_SHAPE: "round"
|
||||
BRANCH: "update-contributors"
|
||||
PULL_REQUEST: "main"
|
27
.github/workflows/docs-test.yml
vendored
Normal file
27
.github/workflows/docs-test.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
name: Test documentation build
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- name: Setup dependencies
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: Build docs
|
||||
run: mkdocs build --strict
|
4
.github/workflows/test-integration.yaml
vendored
4
.github/workflows/test-integration.yaml
vendored
|
@ -26,6 +26,7 @@ jobs:
|
|||
- TestPreAuthKeyCommand
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
- TestPreAuthKeyCommandReusableEphemeral
|
||||
- TestPreAuthKeyCorrectUserLoggedInCommand
|
||||
- TestApiKeyCommand
|
||||
- TestNodeTagCommand
|
||||
- TestNodeAdvertiseTagNoACLCommand
|
||||
|
@ -43,7 +44,8 @@ jobs:
|
|||
- TestTaildrop
|
||||
- TestResolveMagicDNS
|
||||
- TestExpireNode
|
||||
- TestNodeOnlineLastSeenStatus
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- TestEnablingRoutes
|
||||
- TestHASubnetRouterFailover
|
||||
- TestEnableDisableAutoApprovedRoute
|
||||
|
|
|
@ -135,7 +135,7 @@ kos:
|
|||
- id: ghcr-debug
|
||||
repository: ghcr.io/juanfont/headscale
|
||||
bare: true
|
||||
base_image: "debian:12"
|
||||
base_image: gcr.io/distroless/base-debian12:debug
|
||||
build: headscale
|
||||
main: ./cmd/headscale
|
||||
env:
|
||||
|
@ -160,7 +160,7 @@ kos:
|
|||
|
||||
- id: dockerhub-debug
|
||||
build: headscale
|
||||
base_image: "debian:12"
|
||||
base_image: gcr.io/distroless/base-debian12:debug
|
||||
repository: headscale/headscale
|
||||
bare: true
|
||||
platforms:
|
||||
|
|
19
CHANGELOG.md
19
CHANGELOG.md
|
@ -26,7 +26,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
- Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700)
|
||||
- Old structure has been remove and the configuration _must_ be converted.
|
||||
- Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime.
|
||||
- Adds additional configuration for PostgreSQL for setting max open, idle connection and idle connection lifetime.
|
||||
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- The latest supported client is 1.38
|
||||
|
@ -39,6 +39,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
- `/var/lib/headscale` and `/var/run/headscale` is no longer created automatically, see [container docs](./docs/running-headscale-container.md)
|
||||
- Prefixes are now defined per v4 and v6 range. [#1756](https://github.com/juanfont/headscale/pull/1756)
|
||||
- `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6`
|
||||
- `prefixes.allocation` can be set to assign IPs at `sequential` or `random`. [#1869](https://github.com/juanfont/headscale/pull/1869)
|
||||
|
||||
### Changes
|
||||
|
||||
|
@ -53,6 +54,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
- Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
|
||||
- Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
|
||||
- Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702)
|
||||
- Add command to backfill IP addresses for nodes missing IPs from configured prefixes. [#1869](https://github.com/juanfont/headscale/pull/1869)
|
||||
- Log available update as warning [#1877](https://github.com/juanfont/headscale/pull/1877)
|
||||
- Add `autogroup:internet` to Policy [#1917](https://github.com/juanfont/headscale/pull/1917)
|
||||
- Restore foreign keys and add constraints [#1562](https://github.com/juanfont/headscale/pull/1562)
|
||||
|
||||
## 0.22.3 (2023-05-12)
|
||||
|
||||
|
@ -65,7 +70,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
### Changes
|
||||
|
||||
- Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382)
|
||||
- Profiles are continously generated in our integration tests.
|
||||
- Profiles are continuously generated in our integration tests.
|
||||
- Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391)
|
||||
- Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379)
|
||||
- Replace node filter logic, ensuring nodes with access can see eachother [#1381](https://github.com/juanfont/headscale/pull/1381)
|
||||
|
@ -156,7 +161,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
- SSH ACLs status:
|
||||
- Support `accept` and `check` (SSH can be enabled and used for connecting and authentication)
|
||||
- Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**.
|
||||
- If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients.
|
||||
- If you decided to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients.
|
||||
- We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback.
|
||||
- This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`.
|
||||
|
||||
|
@ -206,7 +211,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
### Changes
|
||||
|
||||
- Updated dependencies (including the library that lacked armhf support) [#722](https://github.com/juanfont/headscale/pull/722)
|
||||
- Fix missing group expansion in function `excludeCorretlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563)
|
||||
- Fix missing group expansion in function `excludeCorrectlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563)
|
||||
- Improve registration protocol implementation and switch to NodeKey as main identifier [#725](https://github.com/juanfont/headscale/pull/725)
|
||||
- Add ability to connect to PostgreSQL via unix socket [#734](https://github.com/juanfont/headscale/pull/734)
|
||||
|
||||
|
@ -226,7 +231,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
- Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542)
|
||||
- Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566)
|
||||
- Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362)
|
||||
- Added more configuration parameters for OpenID Connect (scopes, free-form paramters, domain and user allowlist)
|
||||
- Added more configuration parameters for OpenID Connect (scopes, free-form parameters, domain and user allowlist)
|
||||
- Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525)
|
||||
- Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356)
|
||||
- Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360)
|
||||
|
@ -274,10 +279,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
|
||||
- Fix a bug were the same IP could be assigned to multiple hosts if joined in quick succession [#346](https://github.com/juanfont/headscale/pull/346)
|
||||
- Simplify the code behind registration of machines [#366](https://github.com/juanfont/headscale/pull/366)
|
||||
- Nodes are now only written to database if they are registrated successfully
|
||||
- Nodes are now only written to database if they are registered successfully
|
||||
- Fix a limitation in the ACLs that prevented users to write rules with `*` as source [#374](https://github.com/juanfont/headscale/issues/374)
|
||||
- Reduce the overhead of marshal/unmarshal for Hostinfo, routes and endpoints by using specific types in Machine [#371](https://github.com/juanfont/headscale/pull/371)
|
||||
- Apply normalization function to FQDN on hostnames when hosts registers and retrieve informations [#363](https://github.com/juanfont/headscale/issues/363)
|
||||
- Apply normalization function to FQDN on hostnames when hosts registers and retrieve information [#363](https://github.com/juanfont/headscale/issues/363)
|
||||
- Fix a bug that prevented the use of `tailscale logout` with OIDC [#508](https://github.com/juanfont/headscale/issues/508)
|
||||
- Added Tailscale repo HEAD and unstable releases channel to the integration tests targets [#513](https://github.com/juanfont/headscale/pull/513)
|
||||
|
||||
|
|
34
CONTRIBUTING.md
Normal file
34
CONTRIBUTING.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Contributing
|
||||
|
||||
Headscale is "Open Source, acknowledged contribution", this means that any contribution will have to be discussed with the maintainers before being added to the project.
|
||||
This model has been chosen to reduce the risk of burnout by limiting the maintenance overhead of reviewing and validating third-party code.
|
||||
|
||||
## Why do we have this model?
|
||||
|
||||
Headscale has a small maintainer team that tries to balance working on the project, fixing bugs and reviewing contributions.
|
||||
|
||||
When we work on issues ourselves, we develop first hand knowledge of the code and it makes it possible for us to maintain and own the code as the project develops.
|
||||
|
||||
Code contributions are seen as a positive thing. People enjoy and engage with our project, but it also comes with some challenges; we have to understand the code, we have to understand the feature, we might have to become familiar with external libraries or services and we think about security implications. All those steps are required during the reviewing process. After the code has been merged, the feature has to be maintained. Any changes reliant on external services must be updated and expanded accordingly.
|
||||
|
||||
The review and day-1 maintenance adds a significant burden on the maintainers. Often we hope that the contributor will help out, but we found that most of the time, they disappear after their new feature was added.
|
||||
|
||||
This means that when someone contributes, we are mostly happy about it, but we do have to run it through a series of checks to establish if we actually can maintain this feature.
|
||||
|
||||
## What do we require?
|
||||
|
||||
A general description is provided here and an explicit list is provided in our pull request template.
|
||||
|
||||
All new features have to start out with a design document, which should be discussed on the issue tracker (not discord). It should include a use case for the feature, how it can be implemented, who will implement it and a plan for maintaining it.
|
||||
|
||||
All features have to be end-to-end tested (integration tests) and have good unit test coverage to ensure that they work as expected. This will also ensure that the feature continues to work as expected over time. If a change cannot be tested, a strong case for why this is not possible needs to be presented.
|
||||
|
||||
The contributor should help to maintain the feature over time. In case the feature is not maintained probably, the maintainers reserve themselves the right to remove features they redeem as unmaintainable. This should help to improve the quality of the software and keep it in a maintainable state.
|
||||
|
||||
## Bug fixes
|
||||
|
||||
Headscale is open to code contributions for bug fixes without discussion.
|
||||
|
||||
## Documentation
|
||||
|
||||
If you find mistakes in the documentation, please submit a fix to the documentation.
|
|
@ -2,31 +2,24 @@
|
|||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.22-bookworm AS build
|
||||
FROM docker.io/golang:1.22-bookworm
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM docker.io/golang:1.22-bookworm
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends --yes less jq \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
|
|
|
@ -1,21 +1,43 @@
|
|||
# This Dockerfile and the images produced are for testing headscale,
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
FROM golang:latest
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y dnsutils git iptables ssh ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
FROM golang:1.22-alpine AS build-env
|
||||
|
||||
RUN useradd --shell=/bin/bash --create-home ssh-it-user
|
||||
WORKDIR /go/src
|
||||
|
||||
RUN apk add --no-cache git
|
||||
|
||||
# Replace `RUN git...` with `COPY` and a local checked out version of Tailscale in `./tailscale`
|
||||
# to test specific commits of the Tailscale client. This is useful when trying to find out why
|
||||
# something specific broke between two versions of Tailscale with for example `git bisect`.
|
||||
# COPY ./tailscale .
|
||||
RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
WORKDIR /go/tailscale
|
||||
WORKDIR /go/src/tailscale
|
||||
|
||||
RUN git checkout main \
|
||||
&& sh build_dist.sh tailscale.com/cmd/tailscale \
|
||||
&& sh build_dist.sh tailscale.com/cmd/tailscaled \
|
||||
&& cp tailscale /usr/local/bin/ \
|
||||
&& cp tailscaled /usr/local/bin/
|
||||
|
||||
# see build_docker.sh
|
||||
ARG VERSION_LONG=""
|
||||
ENV VERSION_LONG=$VERSION_LONG
|
||||
ARG VERSION_SHORT=""
|
||||
ENV VERSION_SHORT=$VERSION_SHORT
|
||||
ARG VERSION_GIT_HASH=""
|
||||
ENV VERSION_GIT_HASH=$VERSION_GIT_HASH
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN GOARCH=$TARGETARCH go install -ldflags="\
|
||||
-X tailscale.com/version.longStamp=$VERSION_LONG \
|
||||
-X tailscale.com/version.shortStamp=$VERSION_SHORT \
|
||||
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
|
||||
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
|
||||
|
||||
FROM alpine:3.18
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
# For compat with the previous run.sh, although ideally you should be
|
||||
# using build_docker.sh which sets an entrypoint for the image.
|
||||
RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh
|
||||
|
|
1
Makefile
1
Makefile
|
@ -31,6 +31,7 @@ test_integration:
|
|||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v $$PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
|
|
|
@ -97,6 +97,8 @@ func init() {
|
|||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
nodeCmd.AddCommand(backfillNodeIPsCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
|
@ -477,6 +479,57 @@ var moveNodeCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
var backfillNodeIPsCmd = &cobra.Command{
|
||||
Use: "backfillips",
|
||||
Short: "Backfill IPs missing from nodes",
|
||||
Long: `
|
||||
Backfill IPs can be used to add/remove IPs from nodes
|
||||
based on the current configuration of Headscale.
|
||||
|
||||
If there are nodes that does not have IPv4 or IPv6
|
||||
even if prefixes for both are configured in the config,
|
||||
this command can be used to assign IPs of the sort to
|
||||
all nodes that are missing.
|
||||
|
||||
If you remove IPv4 or IPv6 prefixes from the config,
|
||||
it can be run to remove the IPs that should no longer
|
||||
be assigned to nodes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Are you sure that you want to assign/remove IPs to/from nodes?",
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if confirm {
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error backfilling IPs: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentUser string,
|
||||
showTags bool,
|
||||
|
|
|
@ -51,13 +51,11 @@ func initConfig() {
|
|||
|
||||
cfg, err := types.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration")
|
||||
log.Fatal().Err(err).Msg("Failed to read headscale configuration")
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
zerolog.SetGlobalLevel(cfg.Log.Level)
|
||||
|
||||
// If the user has requested a "node" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
|
@ -78,7 +76,7 @@ func initConfig() {
|
|||
res, err := latest.Check(githubTag, Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
log.Warn().Msgf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
Version,
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/jagottsicher/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
|
|
@ -66,6 +66,11 @@ prefixes:
|
|||
v6: fd7a:115c:a1e0::/48
|
||||
v4: 100.64.0.0/10
|
||||
|
||||
# Strategy used for allocation of IPs to nodes, available options:
|
||||
# - sequential (default): assigns the next free IP from the previous given IP.
|
||||
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
|
||||
allocation: sequential
|
||||
|
||||
# DERP is a relay system that Tailscale uses when a direct
|
||||
# connection cannot be established.
|
||||
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
|
||||
|
@ -105,7 +110,7 @@ derp:
|
|||
automatically_add_embedded_derp_region: true
|
||||
|
||||
# For better connection stability (especially when using an Exit-Node and DNS is not working),
|
||||
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
|
||||
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
|
||||
ipv4: 1.2.3.4
|
||||
ipv6: 2001:db8::1
|
||||
|
||||
|
@ -137,12 +142,6 @@ disable_check_updates: false
|
|||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# Period to check for node updates within the tailnet. A value too low will severely affect
|
||||
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
|
||||
# for the nodes, as they won't get updates or keep alive messages frequently enough.
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
database:
|
||||
type: sqlite
|
||||
|
||||
|
@ -205,7 +204,7 @@ log:
|
|||
format: text
|
||||
level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# Path to a file containing ACL policies.
|
||||
# ACLs can be defined as YAML or HUJSON.
|
||||
# https://tailscale.com/kb/1018/acls/
|
||||
acl_policy_path: ""
|
||||
|
|
|
@ -14,7 +14,7 @@ If the node is already registered, it can advertise exit capabilities like this:
|
|||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP fowarding.
|
||||
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding.
|
||||
|
||||
## On the control server
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ We don't know. We might be working on it. If you want to help, please send us a
|
|||
Please be aware that there are a number of reasons why we might not accept specific contributions:
|
||||
|
||||
- It is not possible to implement the feature in a way that makes sense in a self-hosted environment.
|
||||
- Given that we are reverse-engineering Tailscale to satify our own curiosity, we might be interested in implementing the feature ourselves.
|
||||
- Given that we are reverse-engineering Tailscale to satisfy our own curiosity, we might be interested in implementing the feature ourselves.
|
||||
- You are not sending unit and integration tests with it.
|
||||
|
||||
## Do you support Y method of deploying Headscale?
|
||||
|
|
BIN
docs/images/headscale-sealos-grpc-url.png
Normal file
BIN
docs/images/headscale-sealos-grpc-url.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 35 KiB |
BIN
docs/images/headscale-sealos-url.png
Normal file
BIN
docs/images/headscale-sealos-url.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 35 KiB |
|
@ -58,12 +58,12 @@ A solution could be to consider a headscale server (in it's entirety) as a
|
|||
tailnet.
|
||||
|
||||
For personal users the default behavior could either allow all communications
|
||||
between all namespaces (like tailscale) or dissallow all communications between
|
||||
between all namespaces (like tailscale) or disallow all communications between
|
||||
namespaces (current behavior).
|
||||
|
||||
For businesses and organisations, viewing a headscale instance a single tailnet
|
||||
would allow users (namespace) to talk to each other with the ACLs. As described
|
||||
in tailscale's documentation [[1]], a server should be tagged and personnal
|
||||
in tailscale's documentation [[1]], a server should be tagged and personal
|
||||
devices should be tied to a user. Translated in headscale's terms each user can
|
||||
have multiple devices and all those devices should be in the same namespace.
|
||||
The servers should be tagged and used as such.
|
||||
|
@ -88,7 +88,7 @@ the ability to rules in either format (HuJSON or YAML).
|
|||
Let's build an example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developper and an intern.
|
||||
We have a small company with a boss, an admin, two developer and an intern.
|
||||
|
||||
The boss should have access to all servers but not to the users hosts. Admin
|
||||
should also have access to all hosts except that their permissions should be
|
||||
|
@ -173,7 +173,7 @@ need to add the following ACLs
|
|||
"ports": ["prod:*", "dev:*", "internal:*"]
|
||||
},
|
||||
|
||||
// admin have access to adminstration port (lets only consider port 22 here)
|
||||
// admin have access to administration port (lets only consider port 22 here)
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# Controlling `headscale` with remote CLI
|
||||
|
||||
## Prerequisit
|
||||
## Prerequisite
|
||||
|
||||
- A workstation to run `headscale` (could be Linux, macOS, other supported platforms)
|
||||
- A `headscale` server (version `0.13.0` or newer)
|
||||
- Access to create API keys (local access to the `headscale` server)
|
||||
- `headscale` _must_ be served over TLS/HTTPS
|
||||
- Remote access does _not_ support unencrypted traffic.
|
||||
- Port `50443` must be open in the firewall (or port overriden by `grpc_listen_addr` option)
|
||||
- Port `50443` must be open in the firewall (or port overridden by `grpc_listen_addr` option)
|
||||
|
||||
## Goal
|
||||
|
||||
|
@ -97,4 +97,4 @@ Checklist:
|
|||
- Make sure you use version `0.13.0` or newer.
|
||||
- Verify that your TLS certificate is valid and trusted
|
||||
- If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or
|
||||
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environement
|
||||
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environment
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
cairosvg~=2.7.1
|
||||
mkdocs-material~=9.4.14
|
||||
mkdocs-material~=9.5.18
|
||||
mkdocs-minify-plugin~=0.7.1
|
||||
pillow~=10.1.0
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ The following Caddyfile is all that is necessary to use Caddy as a reverse proxy
|
|||
}
|
||||
```
|
||||
|
||||
Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.
|
||||
Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certificate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.
|
||||
|
||||
For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference.
|
||||
|
||||
|
|
|
@ -20,17 +20,19 @@ configuration (`/etc/headscale/config.yaml`).
|
|||
|
||||
## Installation
|
||||
|
||||
1. Download the latest Headscale package for your platform (`.deb` for Ubuntu and Debian) from [Headscale's releases page](https://github.com/juanfont/headscale/releases):
|
||||
1. Download the [latest Headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
```shell
|
||||
HEADSCALE_VERSION="" # See above URL for latest version, e.g. "X.Y.Z" (NOTE: do not add the "v" prefix!)
|
||||
HEADSCALE_ARCH="" # Your system architecture, e.g. "amd64"
|
||||
wget --output-document=headscale.deb \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>.deb
|
||||
"https://github.com/juanfont/headscale/releases/download/v${HEADSCALE_VERSION}/headscale_${HEADSCALE_VERSION}_linux_${HEADSCALE_ARCH}.deb"
|
||||
```
|
||||
|
||||
1. Install Headscale:
|
||||
|
||||
```shell
|
||||
sudo apt install headscale.deb
|
||||
sudo apt install ./headscale.deb
|
||||
```
|
||||
|
||||
1. Enable Headscale service, this will start Headscale at boot:
|
||||
|
|
|
@ -9,19 +9,17 @@
|
|||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1.
|
||||
This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD.
|
||||
In additional to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Install `headscale`
|
||||
|
||||
1. Install from ports (not recommended)
|
||||
1. Install from ports
|
||||
|
||||
!!! info
|
||||
You can install headscale from ports by running `pkg_add headscale`.
|
||||
|
||||
As of OpenBSD 7.2, there's a headscale in ports collection, however, it's severely outdated(v0.12.4). You can install it via `pkg_add headscale`.
|
||||
|
||||
1. Install from source on OpenBSD 7.2
|
||||
1. Install from source
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
|
@ -32,7 +30,7 @@ describing how to make `headscale` run properly in a server environment.
|
|||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option a. you can find official release at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
|
@ -59,7 +57,7 @@ describing how to make `headscale` run properly in a server environment.
|
|||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option a. you can find official release at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
|
|
136
docs/running-headscale-sealos.md
Normal file
136
docs/running-headscale-sealos.md
Normal file
|
@ -0,0 +1,136 @@
|
|||
# Running headscale on Sealos
|
||||
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to run `headscale` on Sealos.
|
||||
|
||||
## Running headscale server
|
||||
|
||||
1. Click the following prebuilt template(version [0.23.0-alpha2](https://github.com/juanfont/headscale/releases/tag/v0.23.0-alpha2)):
|
||||
|
||||
[![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dheadscale)
|
||||
|
||||
2. Click "Deploy Application" on the template page to start deployment. Upon completion, two applications appear: Headscale, and its [visual interface](https://github.com/GoodiesHQ/headscale-admin).
|
||||
3. Once deployment concludes, click 'Details' on the Headscale application page to navigate to the application's details.
|
||||
4. Wait for the application's status to switch to running. For accessing the headscale server, the Public Address associated with port 8080 is the address of the headscale server. To access the Headscale console, simply append `/admin/` to the Headscale public URL.
|
||||
|
||||
![](./images/headscale-sealos-url.png)
|
||||
|
||||
5. Click on 'Terminal' button on the right side of the details to access the Terminal of the headscale application. then create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```bash
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```bash
|
||||
# replace <YOUR_HEADSCALE_URL> with the public domain provided by Sealos
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
To register a machine when running headscale in [Sealos](https://sealos.io), click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then take the headscale command:
|
||||
|
||||
```bash
|
||||
headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key using the command line:
|
||||
|
||||
```bash
|
||||
headscale --user myfirstuser preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```bash
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Controlling headscale with remote CLI
|
||||
|
||||
This documentation has the goal of showing a user how-to set control a headscale instance from a remote machine with the headscale command line binary.
|
||||
|
||||
### Create an API key
|
||||
|
||||
We need to create an API key to authenticate our remote headscale when using it from our workstation.
|
||||
|
||||
To create a API key, click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key:
|
||||
|
||||
```bash
|
||||
headscale apikeys create --expiration 90d
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please note that you can not retrieve a key again, if the key is lost, expire the old one, and create a new key.
|
||||
|
||||
To list the keys currently assosicated with the server:
|
||||
|
||||
```bash
|
||||
headscale apikeys list
|
||||
```
|
||||
|
||||
and to expire a key:
|
||||
|
||||
```bash
|
||||
headscale apikeys expire --prefix "<PREFIX>"
|
||||
```
|
||||
|
||||
### Download and configure `headscale` client
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
|
||||
3. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
4. Configure the CLI through Environment Variables
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:443"
|
||||
export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>"
|
||||
```
|
||||
|
||||
In the headscale application's detail page, The Public Address corresponding to port 50443 corresponds to the value of <HEADSCALE ADDRESS>.
|
||||
|
||||
![](./images/headscale-sealos-grpc-url.png)
|
||||
|
||||
for example:
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="pwnjnnly.cloud.sealos.io:443"
|
||||
export HEADSCALE_CLI_API_KEY="abcde12345"
|
||||
```
|
||||
|
||||
This will tell the `headscale` binary to connect to a remote instance, instead of looking
|
||||
for a local instance.
|
||||
|
||||
The API key is needed to make sure that your are allowed to access the server. The key is _not_
|
||||
needed when running directly on the server, as the connection is local.
|
||||
|
||||
1. Test the connection
|
||||
|
||||
Let us run the headscale command to verify that we can connect by listing our nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
```
|
||||
|
||||
You should now be able to see a list of your nodes from your workstation, and you can
|
||||
now control the `headscale` server from your workstation.
|
||||
|
||||
> Reference: [Headscale Deployment and Usage Guide: Mastering Tailscale's Self-Hosting Basics](https://icloudnative.io/en/posts/how-to-set-up-or-migrate-headscale/)
|
|
@ -5,10 +5,11 @@
|
|||
This page contains community contributions. The projects listed here are not
|
||||
maintained by the Headscale authors and are written by community members.
|
||||
|
||||
| Name | Repository Link | Description | Status |
|
||||
| --------------- | ------------------------------------------------------- | ------------------------------------------------------------------------- | ------ |
|
||||
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha |
|
||||
| Name | Repository Link | Description | Status |
|
||||
| --------------- | ------------------------------------------------------- | --------------------------------------------------------------------------- | ------ |
|
||||
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for Headscale | Beta |
|
||||
|
||||
You can ask for support on our dedicated [Discord channel](https://discord.com/channels/896711691637780480/1105842846386356294).
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Examples
|
||||
|
||||
This directory contains examples on how to run `headscale` on different platforms.
|
||||
|
||||
All examples are provided by the community and they are not verified by the `headscale` authors.
|
2
examples/kustomize/.gitignore
vendored
2
examples/kustomize/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
/**/site
|
||||
/**/secrets
|
|
@ -1,100 +0,0 @@
|
|||
# Deploying headscale on Kubernetes
|
||||
|
||||
**Note:** This is contributed by the community and not verified by the headscale authors.
|
||||
|
||||
This directory contains [Kustomize](https://kustomize.io) templates that deploy
|
||||
headscale in various configurations.
|
||||
|
||||
These templates currently support Rancher k3s. Other clusters may require
|
||||
adaptation, especially around volume claims and ingress.
|
||||
|
||||
Commands below assume this directory is your current working directory.
|
||||
|
||||
# Generate secrets and site configuration
|
||||
|
||||
Run `./init.bash` to generate keys, passwords, and site configuration files.
|
||||
|
||||
Edit `base/site/public.env`, changing `public-hostname` to the public DNS name
|
||||
that will be used for your headscale deployment.
|
||||
|
||||
Set `public-proto` to "https" if you're planning to use TLS & Let's Encrypt.
|
||||
|
||||
Configure DERP servers by editing `base/site/derp.yaml` if needed.
|
||||
|
||||
# Add the image to the registry
|
||||
|
||||
You'll somehow need to get `headscale:latest` into your cluster image registry.
|
||||
|
||||
An easy way to do this with k3s:
|
||||
|
||||
- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`)
|
||||
- `docker build -t headscale:latest ..` from here
|
||||
|
||||
# Create the namespace
|
||||
|
||||
If it doesn't already exist, `kubectl create ns headscale`.
|
||||
|
||||
# Deploy headscale
|
||||
|
||||
## sqlite
|
||||
|
||||
`kubectl -n headscale apply -k ./sqlite`
|
||||
|
||||
## postgres
|
||||
|
||||
`kubectl -n headscale apply -k ./postgres`
|
||||
|
||||
# TLS & Let's Encrypt
|
||||
|
||||
Test a staging certificate with your configured DNS name and Let's Encrypt.
|
||||
|
||||
`kubectl -n headscale apply -k ./staging-tls`
|
||||
|
||||
Replace with a production certificate.
|
||||
|
||||
`kubectl -n headscale apply -k ./production-tls`
|
||||
|
||||
## Static / custom TLS certificates
|
||||
|
||||
Only Let's Encrypt is supported. If you need other TLS settings, modify or patch the ingress.
|
||||
|
||||
# Administration
|
||||
|
||||
Use the wrapper script to remotely operate headscale to perform administrative
|
||||
tasks like creating namespaces, authkeys, etc.
|
||||
|
||||
```
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://github.com/juanfont/headscale
|
||||
|
||||
Usage:
|
||||
headscale [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
namespace Manage the namespaces of headscale
|
||||
node Manage the nodes of headscale
|
||||
preauthkey Handle the preauthkeys in headscale
|
||||
routes Manage the routes of headscale
|
||||
serve Launches the headscale server
|
||||
version Print the version.
|
||||
|
||||
Flags:
|
||||
-h, --help help for headscale
|
||||
-o, --output string Output format. Empty for human-readable, 'json' or 'json-line'
|
||||
|
||||
Use "headscale [command] --help" for more information about a command.
|
||||
|
||||
```
|
||||
|
||||
# TODO / Ideas
|
||||
|
||||
- Interpolate `email:` option to the ClusterIssuer from site configuration.
|
||||
This probably needs to be done with a transformer, kustomize vars don't seem to work.
|
||||
- Add kustomize examples for cloud-native ingress, load balancer
|
||||
- CockroachDB for the backend
|
||||
- DERP server deployment
|
||||
- Tor hidden service
|
|
@ -1,9 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: headscale-config
|
||||
data:
|
||||
server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
listen_addr: "0.0.0.0:8080"
|
||||
metrics_listen_addr: "127.0.0.1:9090"
|
||||
ephemeral_node_inactivity_timeout: "30m"
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: $(PUBLIC_HOSTNAME)
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: headscale
|
||||
port:
|
||||
number: 8080
|
||||
path: /
|
||||
pathType: Prefix
|
|
@ -1,42 +0,0 @@
|
|||
namespace: headscale
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: headscale-site
|
||||
files:
|
||||
- derp.yaml=site/derp.yaml
|
||||
envs:
|
||||
- site/public.env
|
||||
- name: headscale-etc
|
||||
literals:
|
||||
- config.json={}
|
||||
secretGenerator:
|
||||
- name: headscale
|
||||
files:
|
||||
- secrets/private-key
|
||||
vars:
|
||||
- name: PUBLIC_PROTO
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-proto
|
||||
- name: PUBLIC_HOSTNAME
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-hostname
|
||||
- name: CONTACT_EMAIL
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.contact-email
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: headscale
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
selector:
|
||||
app: headscale
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
|
@ -1,3 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
exec kubectl -n headscale exec -ti pod/headscale-0 -- /go/bin/headscale "$@"
|
|
@ -1,22 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
cd $(dirname $0)
|
||||
|
||||
umask 022
|
||||
mkdir -p base/site/
|
||||
[ ! -e base/site/public.env ] && (
|
||||
cat >base/site/public.env <<EOF
|
||||
public-hostname=localhost
|
||||
public-proto=http
|
||||
contact-email=headscale@example.com
|
||||
EOF
|
||||
)
|
||||
[ ! -e base/site/derp.yaml ] && cp ../derp.yaml base/site/derp.yaml
|
||||
|
||||
umask 077
|
||||
mkdir -p base/secrets/
|
||||
[ ! -e base/secrets/private-key ] && (
|
||||
wg genkey > base/secrets/private-key
|
||||
)
|
||||
mkdir -p postgres/secrets/
|
||||
[ ! -e postgres/secrets/password ] && (head -c 32 /dev/urandom | base64 -w0 > postgres/secrets/password)
|
|
@ -1,3 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.4.0/cert-manager.yaml
|
|
@ -1,81 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: METRICS_LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: metrics_listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: postgres
|
||||
- name: DB_HOST
|
||||
value: postgres.headscale.svc.cluster.local
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_USER
|
||||
value: headscale
|
||||
- name: DB_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: DB_NAME
|
||||
value: headscale
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
|
@ -1,13 +0,0 @@
|
|||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- postgres-service.yaml
|
||||
- postgres-statefulset.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
secretGenerator:
|
||||
- name: postgresql
|
||||
files:
|
||||
- secrets/password
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
|
@ -1,49 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: "postgres:13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: POSTGRES_USER
|
||||
value: headscale
|
||||
ports:
|
||||
- name: postgres
|
||||
protocol: TCP
|
||||
containerPort: 5432
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5432
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: pgdata
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: pgdata
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
|
@ -1,11 +0,0 @@
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-production
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
|
@ -1,9 +0,0 @@
|
|||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- production-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production
|
||||
spec:
|
||||
acme:
|
||||
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
|
||||
#email: $(CONTACT_EMAIL)
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-production-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
|
@ -1,5 +0,0 @@
|
|||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- statefulset.yaml
|
|
@ -1,82 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
serviceName: headscale
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: METRICS_LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: metrics_listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: sqlite3
|
||||
- name: DB_PATH
|
||||
value: /vol/data/db.sqlite
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: data
|
||||
mountPath: /vol/data
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
|
@ -1,11 +0,0 @@
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-staging
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
|
@ -1,9 +0,0 @@
|
|||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- staging-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
|
||||
#email: $(CONTACT_EMAIL)
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-staging-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
|
@ -20,11 +20,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1710534455,
|
||||
"narHash": "sha256-huQT4Xs0y4EeFKn2BTBVYgEwJSv8SDlm82uWgMnCMmI=",
|
||||
"lastModified": 1716062047,
|
||||
"narHash": "sha256-OhysviwHQz4p2HZL4g7XGMLoUbWMjkMr/ogaR3VUYNA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "9af9c1c87ed3e3ed271934cb896e0cdd33dae212",
|
||||
"rev": "02923630b89aa1ab36ef8e422501a6f4fd4b2016",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorHash = "sha256-z3IXmr8SK8oUJTnw7gTok6zpLf15kE89q6zYKbMA5AI=";
|
||||
vendorHash = "sha256-EorT2AVwA3usly/LcNor6r5UIhLCdj3L4O4ilgTIC2o=";
|
||||
|
||||
subPackages = ["cmd/headscale"];
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
|
@ -36,7 +36,7 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
|||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x32, 0xfd, 0x17, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53,
|
||||
0x6f, 0x32, 0x80, 0x19, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65,
|
||||
0x72, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
|
@ -161,77 +161,85 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
|||
0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d,
|
||||
0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65,
|
||||
0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e,
|
||||
0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x66,
|
||||
0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44,
|
||||
0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65,
|
||||
0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69,
|
||||
0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b,
|
||||
0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69,
|
||||
0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a,
|
||||
0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
|
||||
0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12,
|
||||
0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75,
|
||||
0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a,
|
||||
0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12,
|
||||
0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
|
||||
0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65,
|
||||
0x66, 0x69, 0x78, 0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
|
@ -252,41 +260,43 @@ var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
|||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
|
||||
(*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetUserResponse)(nil), // 26: headscale.v1.GetUserResponse
|
||||
(*CreateUserResponse)(nil), // 27: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 28: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 29: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 30: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 31: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 32: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse
|
||||
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
|
||||
(*GetRoutesResponse)(nil), // 43: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 44: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 45: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesResponse)(nil), // 46: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 47: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 48: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 49: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 50: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 51: headscale.v1.DeleteApiKeyResponse
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*GetRoutesRequest)(nil), // 18: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 19: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 20: headscale.v1.DisableRouteRequest
|
||||
(*GetNodeRoutesRequest)(nil), // 21: headscale.v1.GetNodeRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 22: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 23: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 24: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 25: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 26: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetUserResponse)(nil), // 27: headscale.v1.GetUserResponse
|
||||
(*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse
|
||||
(*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetUser:input_type -> headscale.v1.GetUserRequest
|
||||
|
@ -306,43 +316,45 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
|||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
26, // 26: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
50, // 50: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
51, // 51: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
26, // [26:52] is the sub-list for method output_type
|
||||
0, // [0:26] is the sub-list for method input_type
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
27, // 27: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
27, // [27:54] is the sub-list for method output_type
|
||||
0, // [0:27] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
|
|
|
@ -795,6 +795,42 @@ func local_request_HeadscaleService_MoveNode_0(ctx context.Context, marshaler ru
|
|||
|
||||
}
|
||||
|
||||
var (
|
||||
filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
)
|
||||
|
||||
func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq BackfillNodeIPsRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.BackfillNodeIPs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq BackfillNodeIPsRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.BackfillNodeIPs(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq GetRoutesRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
@ -1574,6 +1610,31 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
|||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
|
@ -2214,6 +2275,28 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
|||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
|
@ -2450,6 +2533,8 @@ var (
|
|||
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
|
||||
pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, ""))
|
||||
|
||||
pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, ""))
|
||||
|
@ -2504,6 +2589,8 @@ var (
|
|||
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage
|
||||
|
|
|
@ -36,6 +36,7 @@ const (
|
|||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes"
|
||||
HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute"
|
||||
HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute"
|
||||
|
@ -71,6 +72,7 @@ type HeadscaleServiceClient interface {
|
|||
RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
|
||||
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
|
||||
|
@ -245,6 +247,15 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
|
||||
out := new(BackfillNodeIPsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
|
||||
out := new(GetRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...)
|
||||
|
@ -350,6 +361,7 @@ type HeadscaleServiceServer interface {
|
|||
RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
|
||||
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
|
||||
|
@ -419,6 +431,9 @@ func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodes
|
|||
func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
|
||||
}
|
||||
|
@ -765,6 +780,24 @@ func _HeadscaleService_MoveNode_Handler(srv interface{}, ctx context.Context, de
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BackfillNodeIPsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_BackfillNodeIPs_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, req.(*BackfillNodeIPsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
@ -1002,6 +1035,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "MoveNode",
|
||||
Handler: _HeadscaleService_MoveNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRoutes",
|
||||
Handler: _HeadscaleService_GetRoutes_Handler,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
|
@ -1141,6 +1141,100 @@ func (x *DebugCreateNodeResponse) GetNode() *Node {
|
|||
return nil
|
||||
}
|
||||
|
||||
type BackfillNodeIPsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Confirmed bool `protobuf:"varint,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) Reset() {
|
||||
*x = BackfillNodeIPsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackfillNodeIPsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{19}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) GetConfirmed() bool {
|
||||
if x != nil {
|
||||
return x.Confirmed
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type BackfillNodeIPsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Changes []string `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) Reset() {
|
||||
*x = BackfillNodeIPsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackfillNodeIPsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackfillNodeIPsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillNodeIPsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{20}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) GetChanges() []string {
|
||||
if x != nil {
|
||||
return x.Changes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_node_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_node_proto_rawDesc = []byte{
|
||||
|
@ -1260,18 +1354,25 @@ var file_headscale_v1_node_proto_rawDesc = []byte{
|
|||
0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x26, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67,
|
||||
0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52,
|
||||
0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55,
|
||||
0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18,
|
||||
0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f,
|
||||
0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45,
|
||||
0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c,
|
||||
0x49, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f,
|
||||
0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b,
|
||||
0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64,
|
||||
0x22, 0x33, 0x0a, 0x17, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65,
|
||||
0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63,
|
||||
0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68,
|
||||
0x61, 0x6e, 0x67, 0x65, 0x73, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x47, 0x49,
|
||||
0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50,
|
||||
0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x47,
|
||||
0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x41, 0x55, 0x54,
|
||||
0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x47, 0x49, 0x53,
|
||||
0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x10, 0x02,
|
||||
0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54,
|
||||
0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
|
||||
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -1287,7 +1388,7 @@ func file_headscale_v1_node_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_headscale_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
|
||||
var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
|
||||
var file_headscale_v1_node_proto_goTypes = []interface{}{
|
||||
(RegisterMethod)(0), // 0: headscale.v1.RegisterMethod
|
||||
(*Node)(nil), // 1: headscale.v1.Node
|
||||
|
@ -1309,16 +1410,18 @@ var file_headscale_v1_node_proto_goTypes = []interface{}{
|
|||
(*MoveNodeResponse)(nil), // 17: headscale.v1.MoveNodeResponse
|
||||
(*DebugCreateNodeRequest)(nil), // 18: headscale.v1.DebugCreateNodeRequest
|
||||
(*DebugCreateNodeResponse)(nil), // 19: headscale.v1.DebugCreateNodeResponse
|
||||
(*User)(nil), // 20: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 22: headscale.v1.PreAuthKey
|
||||
(*BackfillNodeIPsRequest)(nil), // 20: headscale.v1.BackfillNodeIPsRequest
|
||||
(*BackfillNodeIPsResponse)(nil), // 21: headscale.v1.BackfillNodeIPsResponse
|
||||
(*User)(nil), // 22: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 24: headscale.v1.PreAuthKey
|
||||
}
|
||||
var file_headscale_v1_node_proto_depIdxs = []int32{
|
||||
20, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User
|
||||
21, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp
|
||||
21, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp
|
||||
22, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
21, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp
|
||||
22, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User
|
||||
23, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp
|
||||
23, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp
|
||||
24, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
23, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 5: headscale.v1.Node.register_method:type_name -> headscale.v1.RegisterMethod
|
||||
1, // 6: headscale.v1.RegisterNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
|
||||
|
@ -1571,6 +1674,30 @@ func file_headscale_v1_node_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_node_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BackfillNodeIPsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_node_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BackfillNodeIPsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
|
@ -1578,7 +1705,7 @@ func file_headscale_v1_node_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_node_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 19,
|
||||
NumMessages: 21,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.32.0
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
|
|
@ -194,6 +194,36 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/backfillips": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_BackfillNodeIPs",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1BackfillNodeIPsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "confirmed",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/register": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_RegisterNode",
|
||||
|
@ -886,6 +916,17 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"v1BackfillNodeIPsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"changes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1CreateApiKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
118
go.mod
118
go.mod
|
@ -1,54 +1,55 @@
|
|||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.22
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.0
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/coreos/go-oidc/v3 v3.9.0
|
||||
github.com/coreos/go-oidc/v3 v3.10.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/deckarep/golang-set/v2 v2.6.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/glebarez/sqlite v1.10.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.1
|
||||
github.com/gofrs/uuid/v5 v5.0.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2
|
||||
github.com/gofrs/uuid/v5 v5.2.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1
|
||||
github.com/klauspost/compress v1.17.6
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.10.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/prometheus/common v0.46.0
|
||||
github.com/pterm/pterm v0.12.78
|
||||
github.com/puzpuzpuz/xsync/v3 v3.0.2
|
||||
github.com/pterm/pterm v0.12.79
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0
|
||||
github.com/rs/zerolog v1.32.0
|
||||
github.com/samber/lo v1.39.0
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.18.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
|
||||
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3
|
||||
golang.org/x/net v0.22.0
|
||||
golang.org/x/oauth2 v0.17.0
|
||||
golang.org/x/sync v0.6.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014
|
||||
google.golang.org/grpc v1.61.0
|
||||
google.golang.org/protobuf v1.32.0
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/oauth2 v0.20.0
|
||||
golang.org/x/sync v0.7.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291
|
||||
google.golang.org/grpc v1.64.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.5.4
|
||||
gorm.io/gorm v1.25.5
|
||||
tailscale.com v1.58.2
|
||||
gorm.io/driver/postgres v1.5.7
|
||||
gorm.io/gorm v1.25.10
|
||||
tailscale.com v1.66.3
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -58,7 +59,7 @@ require (
|
|||
dario.cat/mergo v1.0.0 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
|
@ -77,35 +78,39 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/continuity v0.4.3 // indirect
|
||||
github.com/coreos/go-iptables v0.7.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.14.5 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/docker/cli v25.0.3+incompatible // indirect
|
||||
github.com/docker/docker v25.0.3+incompatible // indirect
|
||||
github.com/docker/cli v26.1.3+incompatible // indirect
|
||||
github.com/docker/docker v26.1.3+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/felixge/fgprof v0.9.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.5.0 // indirect
|
||||
github.com/gaissmai/bart v0.4.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
|
@ -119,7 +124,7 @@ require (
|
|||
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.3 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.5 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
|
@ -144,12 +149,14 @@ require (
|
|||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.12 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
|
@ -166,16 +173,17 @@ require (
|
|||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.1 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780 // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 // indirect
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754 // indirect
|
||||
github.com/tcnksm/go-httpstat v0.2.0 // indirect
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
|
||||
|
@ -187,25 +195,21 @@ require (
|
|||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/term v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.19.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect
|
||||
inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect
|
||||
modernc.org/libc v1.49.3 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 // indirect
|
||||
modernc.org/libc v1.50.6 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
modernc.org/sqlite v1.28.0 // indirect
|
||||
modernc.org/sqlite v1.29.9 // indirect
|
||||
nhooyr.io/websocket v1.8.10 // indirect
|
||||
)
|
||||
|
|
326
go.sum
326
go.sum
|
@ -29,8 +29,8 @@ github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/
|
|||
github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE=
|
||||
github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4=
|
||||
github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
|
@ -83,14 +83,22 @@ github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6
|
|||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4=
|
||||
github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
|
@ -100,17 +108,18 @@ github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn
|
|||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8=
|
||||
github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/mds v0.14.5 h1:2amuO4yCbQkaAyDoLO5iCbwbTRQZz4EpRhOejQbf4+8=
|
||||
github.com/creachadair/mds v0.14.5/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||||
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
|
@ -121,40 +130,49 @@ github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80N
|
|||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
|
||||
github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
|
||||
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc=
|
||||
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/efekarakus/termcolor v1.0.1 h1:YAKFO3bnLrqZGTWyNLcYoSIAQFKVOmbqmDnwsU/znzg=
|
||||
github.com/efekarakus/termcolor v1.0.1/go.mod h1:AitrZNrE4nPO538fRsqf+p0WgLdAsGN5pUNrHEPsEMM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88=
|
||||
github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
|
||||
github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
||||
github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls=
|
||||
github.com/gaissmai/bart v0.4.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.10.0 h1:u4gt8y7OND/cCei/NMHmfbLxF6xP2wgKcT/BJf2pYkc=
|
||||
github.com/glebarez/sqlite v1.10.0/go.mod h1:IJ+lfSOmiekhQsFTJRx/lHtGYmCdtAiTaf5wI9u5uHA=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.1 h1:eGS0WTFRV30r103lU8JNXY27KbviRnqqIDobW3EV3iY=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.1/go.mod h1:L7nJ620PFDKei9QOhJzqA8kRCk+E3UbV2f5gv+1ndLc=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U=
|
||||
github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg=
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
|
@ -162,15 +180,18 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW
|
|||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
|
||||
github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM=
|
||||
github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -178,17 +199,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
|
@ -197,11 +214,12 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
|||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c h1:06RMfw+TMMHtRuUOroMeatRCCgSMWXCJQeABvHU69YQ=
|
||||
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c/go.mod h1:BVIYo3cdnT4qSylnYqcd5YtmXhr51cJPGtnLBe/uLBU=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 h1:velgFPYr1X9TDwLIfkV7fWqsFlf7TeP11M/7kPd/dVI=
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
|
@ -218,10 +236,13 @@ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kX
|
|||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
|
@ -229,6 +250,7 @@ github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3s
|
|||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio=
|
||||
github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
|
@ -239,10 +261,14 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
|||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.3 h1:Ces6/M3wbDXYpM8JyyPD57ivTtJACFZJd885pdIaV2s=
|
||||
github.com/jackc/pgx/v5 v5.5.3/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
|
||||
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
github.com/jagottsicher/termcolor v1.0.2/go.mod h1:RcH8uFwF/0wbEdQmi83rjmlJ+QOKdMSE9Rc1BEB7zFo=
|
||||
github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=
|
||||
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
|
@ -251,6 +277,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
|||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk=
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8=
|
||||
|
@ -260,13 +287,13 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
|
|||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
|
||||
|
@ -280,17 +307,18 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
|
@ -315,27 +343,32 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
|
|||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 h1:TQMyrpijtkFyXpNI3rY5hsZQZw+paiH+BfAlsb81HBY=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282/go.mod h1:rW25Kyd08Wdn3UVn0YBsDTSvReu0jqpmJKzxITPSjks=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
|
||||
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
|
||||
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
|
@ -368,10 +401,10 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
|
|||
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
|
||||
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.78 h1:QTWKaIAa4B32GKwqVXtu9m1DUMgWw3VRljMkMevX+b8=
|
||||
github.com/pterm/pterm v0.12.78/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.0.2 h1:3yESHrRFYr6xzkz61LLkvNiPFXxJEAABanTQpKbAaew=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.0.2/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/pterm/pterm v0.12.79 h1:lH3yrYMhdpeqX9y5Ep1u7DejyHy7NSQg9qrBjF9dFT4=
|
||||
github.com/pterm/pterm v0.12.79/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
|
@ -392,6 +425,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g
|
|||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
|
||||
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
|
||||
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
|
||||
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
|
@ -414,8 +449,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0=
|
||||
github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
|
@ -423,9 +458,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
|
||||
|
@ -440,14 +475,22 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X
|
|||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk=
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 h1:0bcWsoeSBbY3XWRS1F8yp/g343E5TQMakwy5cxJS+ZU=
|
||||
github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0/go.mod h1:/8aqnX9aU8yubwQ2InR5mHi1OlfWQ8ei8Ea2eyLScOY=
|
||||
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b h1:FzqUT8XFn3OJTzTMteYMZlg3EUQMxoq7oJiaVj4SEBA=
|
||||
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b/go.mod h1:Nkao4BDbQqzxxg78ty4ejq+KgX/0Bxj00DxfxScuJoI=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 h1:xAgOVncJuuxkFZ2oXXDKFTH4HDdFYSZRYdA6oMrCewg=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 h1:zwsem4CaamMdC3tFoTpzrsUSMDPV0K6rhnQdF7kXekQ=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w=
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU=
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 h1:6WsbDYsikRNmmbfZoRoyIEA9tfl0aspPAE0t7nBj2B4=
|
||||
github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257/go.mod h1:hrq01/0LUDZf4mMkcZ7Ovmy33jvCi4RpESpb9kPxV6E=
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 h1:zT+qB+2Ghulj50d5Wq6h6vQYqD2sPdhy4FF6+FHedVE=
|
||||
github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185/go.mod h1:LoIjI6z/6efr9ebISQ5l2vjQmjc8QJrAYZdy3Ec3sVs=
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 h1:wmsnxEEuRlgK7Bhdkmm0JGrjjc0JoHZThLLo0WXXbLs=
|
||||
github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1/go.mod h1:XN193fbz9RR/5stlWPMMIZR+TTa1BUkDJm5Azwzxwgw=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754 h1:iazWjqVHE6CbNam7WXRhi33Qad5o7a8LVYgVoILpZdI=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA=
|
||||
github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
|
||||
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0=
|
||||
|
@ -456,8 +499,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2
|
|||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/u-root/u-root v0.11.0 h1:6gCZLOeRyevw7gbTwMj3fKxnr9+yHFlgF3N7udUVNO8=
|
||||
github.com/u-root/u-root v0.11.0/go.mod h1:DBkDtiZyONk9hzVEdB/PWI9B4TxDkElWlVTHseglrZY=
|
||||
github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs=
|
||||
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw=
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
|
@ -491,20 +534,19 @@ go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wus
|
|||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo=
|
||||
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9 h1:j3D9DvWRpUfIyFfDPws7LoIZ2MAI1OJHdQXtTnYtN+k=
|
||||
golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ=
|
||||
golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a h1:8qmSSA8Gz/1kTrCe0nqR0R3Gb/NDhykzWw2q2mWZydM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8=
|
||||
golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
@ -513,8 +555,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -524,14 +566,14 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
|
||||
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -540,28 +582,25 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -573,26 +612,29 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -606,8 +648,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -618,28 +660,22 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus
|
|||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 h1:4HZJ3Xv1cmrJ+0aFo304Zn79ur1HMxptAE7aCPNLSqc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0=
|
||||
google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -647,8 +683,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
|||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -658,40 +692,32 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo=
|
||||
gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0=
|
||||
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
|
||||
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM=
|
||||
gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA=
|
||||
gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s=
|
||||
gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c h1:bYb98Ra11fJ8F2xFbZx0zg2VQ28lYqC1JxfaaF53xqY=
|
||||
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c/go.mod h1:AVgIgHMwK63XvmAzWG9vLQ41YnVHN0du0tEC46fI7yY=
|
||||
gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM=
|
||||
gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
|
||||
honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
|
||||
honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs=
|
||||
honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
inet.af/peercred v0.0.0-20210906144145-0893ea02156a h1:qdkS8Q5/i10xU2ArJMKYhVa1DORzBfYS/qA2UK2jheg=
|
||||
inet.af/peercred v0.0.0-20210906144145-0893ea02156a/go.mod h1:FjawnflS/udxX+SvpsMgZfdqx2aykOlkISeAsADi5IU=
|
||||
inet.af/wf v0.0.0-20221017222439-36129f591884 h1:zg9snq3Cpy50lWuVqDYM7AIRVTtU50y5WXETMFohW/Q=
|
||||
inet.af/wf v0.0.0-20221017222439-36129f591884/go.mod h1:bSAQ38BYbY68uwpasXOTZo22dKGy9SNvI6PZFeKomZE=
|
||||
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
|
||||
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
|
||||
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
|
||||
modernc.org/cc/v4 v4.20.0 h1:45Or8mQfbUqJOG9WaxvlFYOAQO0lQ5RvqBcFCXngjxk=
|
||||
modernc.org/cc/v4 v4.20.0/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v3 v3.17.0 h1:o3OmOqx4/OFnl4Vm3G8Bgmqxnvxnh0nbxeT5p/dWChA=
|
||||
modernc.org/ccgo/v3 v3.17.0/go.mod h1:Sg3fwVpmLvCUTaqEUjiBDAvshIaKDB0RXaf+zgqFu8I=
|
||||
modernc.org/ccgo/v4 v4.16.0 h1:ofwORa6vx2FMm0916/CkZjpFPSR70VwTjUCe2Eg5BnA=
|
||||
modernc.org/ccgo/v4 v4.16.0/go.mod h1:dkNyWIjFrVIZ68DTo36vHK+6/ShBn4ysU61So6PIqCI=
|
||||
modernc.org/cc/v4 v4.21.2 h1:dycHFB/jDc3IyacKipCNSDrjIC0Lm1hyoWOZTRR20Lk=
|
||||
modernc.org/cc/v4 v4.21.2/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.17.7 h1:+MG+Np7uYtsuPvtoH3KtZ1+pqNiJAOqqqVIxggE1iIo=
|
||||
modernc.org/ccgo/v4 v4.17.7/go.mod h1:x87xuLLXuJv3Nn5ULTUqJn/HsTMMMiT1Eavo6rz1NiY=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/libc v1.49.3 h1:j2MRCRdwJI2ls/sGbeSk0t2bypOG/uvPZUsGQFDulqg=
|
||||
modernc.org/libc v1.49.3/go.mod h1:yMZuGkn7pXbKfoT/M35gFJOAEdSKdxL0q64sF7KqCDo=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||
modernc.org/libc v1.50.6 h1:72NPEFMyKP01RJrKXS2eLXv35UklKqlJZ1b9P7gSo6I=
|
||||
modernc.org/libc v1.50.6/go.mod h1:8lr2m1THY5Z3ikGyUc3JhLEQg1oaIBz/AQixw8/eksQ=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
|
@ -700,15 +726,15 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
|||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
|
||||
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
|
||||
modernc.org/sqlite v1.29.9 h1:9RhNMklxJs+1596GNuAX+O/6040bvOwacTxuFcRuQow=
|
||||
modernc.org/sqlite v1.29.9/go.mod h1:ItX2a1OVGgNsFh6Dv60JQvGfJfTPHPVpV6DF59akYOA=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q=
|
||||
nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c=
|
||||
software.sslmate.com/src/go-pkcs12 v0.2.1 h1:tbT1jjaeFOF230tzOIRJ6U5S1jNqpsSyNjzDd58H3J8=
|
||||
software.sslmate.com/src/go-pkcs12 v0.2.1/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.58.2 h1:5trkhh/fpUn7f6TUcGUQYJ0GokdNNfNrjh9ONJhoc5A=
|
||||
tailscale.com v1.58.2/go.mod h1:faWR8XaXemnSKCDjHC7SAQzaagkUjA5x4jlLWiwxtuk=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.66.3 h1:jpWat+hiobTtCosSV/c8D6S/ubgROf/S59MaIBdM9pY=
|
||||
tailscale.com v1.66.3/go.mod h1:99BIV4U3UPw36Sva04xK2ZsEpVRUkY9jCdEDSAhaNGM=
|
||||
|
|
248
hscontrol/app.go
248
hscontrol/app.go
|
@ -9,7 +9,6 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" //nolint
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
|
@ -20,6 +19,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/mux"
|
||||
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/derp"
|
||||
derpServer "github.com/juanfont/headscale/hscontrol/derp/server"
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
|
@ -55,6 +56,7 @@ import (
|
|||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -69,7 +71,7 @@ var (
|
|||
|
||||
const (
|
||||
AuthPrefix = "Bearer "
|
||||
updateInterval = 5000
|
||||
updateInterval = 5 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
headscaleDirPerm = 0o700
|
||||
|
||||
|
@ -77,6 +79,11 @@ const (
|
|||
registerCacheCleanup = time.Minute * 20
|
||||
)
|
||||
|
||||
// func init() {
|
||||
// deadlock.Opts.DeadlockTimeout = 15 * time.Second
|
||||
// deadlock.Opts.PrintAllCurrentGoroutines = true
|
||||
// }
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg *types.Config
|
||||
|
@ -89,6 +96,7 @@ type Headscale struct {
|
|||
|
||||
ACLPolicy *policy.ACLPolicy
|
||||
|
||||
mapper *mapper.Mapper
|
||||
nodeNotifier *notifier.Notifier
|
||||
|
||||
oidcProvider *oidc.Provider
|
||||
|
@ -96,15 +104,16 @@ type Headscale struct {
|
|||
|
||||
registrationCache *cache.Cache
|
||||
|
||||
shutdownChan chan struct{}
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
|
||||
var (
|
||||
profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED")
|
||||
profilingEnabled = envknob.Bool("HEADSCALE_DEBUG_PROFILING_ENABLED")
|
||||
profilingPath = envknob.String("HEADSCALE_DEBUG_PROFILING_PATH")
|
||||
tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED")
|
||||
tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR")
|
||||
tailsqlTSKey = envknob.String("TS_AUTHKEY")
|
||||
dumpConfig = envknob.Bool("HEADSCALE_DEBUG_DUMP_CONFIG")
|
||||
)
|
||||
|
||||
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
|
@ -128,7 +137,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
noisePrivateKey: noisePrivateKey,
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
nodeNotifier: notifier.NewNotifier(),
|
||||
nodeNotifier: notifier.NewNotifier(cfg),
|
||||
}
|
||||
|
||||
app.db, err = db.NewHeadscaleDatabase(
|
||||
|
@ -138,7 +147,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
app.ipAlloc, err = db.NewIPAllocator(app.db, *cfg.PrefixV4, *cfg.PrefixV6)
|
||||
app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -156,7 +165,15 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
|
||||
if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS
|
||||
// TODO(kradalby): revisit why this takes a list.
|
||||
magicDNSDomains := util.GenerateMagicDNSRootDomains([]netip.Prefix{*cfg.PrefixV4, *cfg.PrefixV6})
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
|
||||
}
|
||||
|
||||
// we might have routes already from Split DNS
|
||||
if app.cfg.DNSConfig.Routes == nil {
|
||||
app.cfg.DNSConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
|
@ -199,54 +216,77 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
|
|||
http.Redirect(w, req, target, http.StatusFound)
|
||||
}
|
||||
|
||||
// expireEphemeralNodes deletes ephemeral node records that have not been
|
||||
// deleteExpireEphemeralNodes deletes ephemeral node records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout.
|
||||
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
func (h *Headscale) deleteExpireEphemeralNodes(ctx context.Context, every time.Duration) {
|
||||
ticker := time.NewTicker(every)
|
||||
|
||||
var update types.StateUpdate
|
||||
var changed bool
|
||||
for range ticker.C {
|
||||
if err := h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
update, changed = db.ExpireEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
var removed []types.NodeID
|
||||
var changed []types.NodeID
|
||||
if err := h.db.Write(func(tx *gorm.DB) error {
|
||||
removed, changed = db.DeleteExpiredEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring ephemeral nodes")
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring ephemeral nodes")
|
||||
continue
|
||||
}
|
||||
|
||||
if changed && update.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, update)
|
||||
if removed != nil {
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: removed,
|
||||
})
|
||||
}
|
||||
|
||||
if changed != nil {
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changed,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// expireExpiredMachines expires nodes that have an explicit expiry set
|
||||
// expireExpiredNodes expires nodes that have an explicit expiry set
|
||||
// after that expiry time has passed.
|
||||
func (h *Headscale) expireExpiredMachines(intervalMs int64) {
|
||||
interval := time.Duration(intervalMs) * time.Millisecond
|
||||
ticker := time.NewTicker(interval)
|
||||
func (h *Headscale) expireExpiredNodes(ctx context.Context, every time.Duration) {
|
||||
ticker := time.NewTicker(every)
|
||||
|
||||
lastCheck := time.Unix(0, 0)
|
||||
var update types.StateUpdate
|
||||
var changed bool
|
||||
|
||||
for range ticker.C {
|
||||
if err := h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := h.db.Write(func(tx *gorm.DB) error {
|
||||
lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring nodes")
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("database error while expiring nodes")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace().Str("nodes", update.ChangeNodes.String()).Msgf("expiring nodes")
|
||||
if changed && update.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-expired", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, update)
|
||||
if changed {
|
||||
log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes")
|
||||
|
||||
ctx := types.NotifyCtx(context.Background(), "expire-expired", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, update)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -272,14 +312,11 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
|||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StateDERPUpdated,
|
||||
DERPMap: h.DERPMap,
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, stateUpdate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -292,7 +329,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
|||
// Check if the request is coming from the on-server client.
|
||||
// This is not secure, but it is to maintain maintainability
|
||||
// with the "legacy" database-based client
|
||||
// It is also neede for grpc-gateway to be able to connect to
|
||||
// It is also needed for grpc-gateway to be able to connect to
|
||||
// the server
|
||||
client, _ := peer.FromContext(ctx)
|
||||
|
||||
|
@ -303,11 +340,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
|||
|
||||
meta, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Retrieving metadata is failed")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
|
@ -316,11 +348,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
|||
|
||||
authHeader, ok := meta["authorization"]
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Authorization token is not supplied")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
|
@ -330,11 +357,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
|||
token := authHeader[0]
|
||||
|
||||
if !strings.HasPrefix(token, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
|
||||
return ctx, status.Error(
|
||||
codes.Unauthenticated,
|
||||
`missing "Bearer " prefix in "Authorization" header`,
|
||||
|
@ -343,12 +365,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
|||
|
||||
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("failed to validate token")
|
||||
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
}
|
||||
|
||||
|
@ -446,7 +462,7 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
|||
|
||||
func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux)
|
||||
router.Use(prometheusMiddleware)
|
||||
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost)
|
||||
|
||||
|
@ -483,16 +499,16 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
|||
return router
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API.
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
func (h *Headscale) Serve() error {
|
||||
if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
|
||||
if profilePath, ok := os.LookupEnv("HEADSCALE_PROFILING_PATH"); ok {
|
||||
err := os.MkdirAll(profilePath, os.ModePerm)
|
||||
if profilingEnabled {
|
||||
if profilingPath != "" {
|
||||
err := os.MkdirAll(profilingPath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to create profiling directory")
|
||||
}
|
||||
|
||||
defer profile.Start(profile.ProfilePath(profilePath)).Stop()
|
||||
defer profile.Start(profile.ProfilePath(profilingPath)).Stop()
|
||||
} else {
|
||||
defer profile.Start().Stop()
|
||||
}
|
||||
|
@ -500,8 +516,13 @@ func (h *Headscale) Serve() error {
|
|||
|
||||
var err error
|
||||
|
||||
if dumpConfig {
|
||||
spew.Dump(h.cfg)
|
||||
}
|
||||
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||
h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
// When embedded DERP is enabled we always need a STUN server
|
||||
|
@ -511,7 +532,7 @@ func (h *Headscale) Serve() error {
|
|||
|
||||
region, err := h.DERPServer.GenerateRegion()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("generating DERP region for embedded server: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
|
@ -531,10 +552,13 @@ func (h *Headscale) Serve() error {
|
|||
return errEmptyInitialDERPMap
|
||||
}
|
||||
|
||||
// TODO(kradalby): These should have cancel channels and be cleaned
|
||||
// up on shutdown.
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
go h.expireExpiredMachines(updateInterval)
|
||||
expireEphemeralCtx, expireEphemeralCancel := context.WithCancel(context.Background())
|
||||
defer expireEphemeralCancel()
|
||||
go h.deleteExpireEphemeralNodes(expireEphemeralCtx, updateInterval)
|
||||
|
||||
expireNodeCtx, expireNodeCancel := context.WithCancel(context.Background())
|
||||
defer expireNodeCancel()
|
||||
go h.expireExpiredNodes(expireNodeCtx, updateInterval)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
zerolog.RespLog = true
|
||||
|
@ -586,14 +610,14 @@ func (h *Headscale) Serve() error {
|
|||
}...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("setting up gRPC gateway via socket: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the gRPC server over localhost to skip
|
||||
// the authentication.
|
||||
err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("registering Headscale API service to gRPC: %w", err)
|
||||
}
|
||||
|
||||
// Start the local gRPC server without TLS and without authentication
|
||||
|
@ -614,9 +638,7 @@ func (h *Headscale) Serve() error {
|
|||
|
||||
tlsConfig, err := h.getTLSSettings()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to set up TLS configuration")
|
||||
|
||||
return err
|
||||
return fmt.Errorf("configuring TLS settings: %w", err)
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -693,18 +715,17 @@ func (h *Headscale) Serve() error {
|
|||
// HTTP setup
|
||||
//
|
||||
// This is the regular router that we expose
|
||||
// over our main Addr. It also serves the legacy Tailcale API
|
||||
// over our main Addr
|
||||
router := h.createRouter(grpcGatewayMux)
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: router,
|
||||
ReadTimeout: types.HTTPReadTimeout,
|
||||
// Go does not handle timeouts in HTTP very well, and there is
|
||||
// no good way to handle streaming timeouts, therefore we need to
|
||||
// keep this at unlimited and be careful to clean up connections
|
||||
// https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/#aboutstreaming
|
||||
WriteTimeout: 0,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
|
||||
// Long polling should not have any timeout, this is overriden
|
||||
// further down the chain
|
||||
WriteTimeout: types.HTTPTimeout,
|
||||
}
|
||||
|
||||
var httpListener net.Listener
|
||||
|
@ -723,27 +744,30 @@ func (h *Headscale) Serve() error {
|
|||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
promMux := http.NewServeMux()
|
||||
promMux.Handle("/metrics", promhttp.Handler())
|
||||
debugMux := http.NewServeMux()
|
||||
debugMux.Handle("/debug/pprof/", http.DefaultServeMux)
|
||||
debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(h.nodeNotifier.String()))
|
||||
})
|
||||
debugMux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
promHTTPServer := &http.Server{
|
||||
debugHTTPServer := &http.Server{
|
||||
Addr: h.cfg.MetricsAddr,
|
||||
Handler: promMux,
|
||||
ReadTimeout: types.HTTPReadTimeout,
|
||||
Handler: debugMux,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
|
||||
var promHTTPListener net.Listener
|
||||
promHTTPListener, err = net.Listen("tcp", h.cfg.MetricsAddr)
|
||||
|
||||
debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return promHTTPServer.Serve(promHTTPListener) })
|
||||
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
|
||||
Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr)
|
||||
|
||||
var tailsqlContext context.Context
|
||||
if tailsqlEnabled {
|
||||
|
@ -760,7 +784,6 @@ func (h *Headscale) Serve() error {
|
|||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
h.shutdownChan = make(chan struct{})
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc,
|
||||
syscall.SIGHUP,
|
||||
|
@ -799,12 +822,15 @@ func (h *Headscale) Serve() error {
|
|||
}
|
||||
|
||||
default:
|
||||
trace := log.Trace().Msgf
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
|
||||
close(h.shutdownChan)
|
||||
expireNodeCancel()
|
||||
expireEphemeralCancel()
|
||||
|
||||
trace("waiting for netmap stream to close")
|
||||
h.pollNetMapStreamWG.Wait()
|
||||
|
||||
// Gracefully shut down servers
|
||||
|
@ -812,32 +838,44 @@ func (h *Headscale) Serve() error {
|
|||
context.Background(),
|
||||
types.HTTPShutdownTimeout,
|
||||
)
|
||||
if err := promHTTPServer.Shutdown(ctx); err != nil {
|
||||
trace("shutting down debug http server")
|
||||
if err := debugHTTPServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown prometheus http")
|
||||
}
|
||||
trace("shutting down main http server")
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown http")
|
||||
}
|
||||
|
||||
trace("shutting down grpc server (socket)")
|
||||
grpcSocket.GracefulStop()
|
||||
|
||||
if grpcServer != nil {
|
||||
trace("shutting down grpc server (external)")
|
||||
grpcServer.GracefulStop()
|
||||
grpcListener.Close()
|
||||
}
|
||||
|
||||
if tailsqlContext != nil {
|
||||
trace("shutting down tailsql")
|
||||
tailsqlContext.Done()
|
||||
}
|
||||
|
||||
trace("closing node notifier")
|
||||
h.nodeNotifier.Close()
|
||||
|
||||
// Close network listeners
|
||||
promHTTPListener.Close()
|
||||
trace("closing network listeners")
|
||||
debugHTTPListener.Close()
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
trace("closing socket listener")
|
||||
socketListener.Close()
|
||||
|
||||
// Close db connections
|
||||
trace("closing database connection")
|
||||
err = h.db.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to close db")
|
||||
|
@ -895,7 +933,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
|||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
ReadTimeout: types.HTTPReadTimeout,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
|
|
@ -62,18 +62,18 @@ func logAuthFunc(
|
|||
func (h *Headscale) handleRegister(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
|
||||
logInfo, logTrace, logErr := logAuthFunc(regReq, machineKey)
|
||||
now := time.Now().UTC()
|
||||
logTrace("handleRegister called, looking up machine in DB")
|
||||
node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
|
||||
node, err := h.db.GetNodeByAnyKey(machineKey, regReq.NodeKey, regReq.OldNodeKey)
|
||||
logTrace("handleRegister database lookup has returned")
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// If the node has AuthKey set, handle registration via PreAuthKeys
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func (h *Headscale) handleRegister(
|
|||
// This is not implemented yet, as it is no strictly required. The only side-effect
|
||||
// is that the client will hammer headscale with requests until it gets a
|
||||
// successful RegisterResponse.
|
||||
if registerRequest.Followup != "" {
|
||||
if regReq.Followup != "" {
|
||||
logTrace("register request is a followup")
|
||||
if _, ok := h.registrationCache.Get(machineKey.String()); ok {
|
||||
logTrace("Node is waiting for interactive login")
|
||||
|
@ -95,7 +95,7 @@ func (h *Headscale) handleRegister(
|
|||
case <-req.Context().Done():
|
||||
return
|
||||
case <-time.After(registrationHoldoff):
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
h.handleNewNode(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (h *Headscale) handleRegister(
|
|||
|
||||
givenName, err := h.db.GenerateGivenName(
|
||||
machineKey,
|
||||
registerRequest.Hostinfo.Hostname,
|
||||
regReq.Hostinfo.Hostname,
|
||||
)
|
||||
if err != nil {
|
||||
logErr(err, "Failed to generate given name for node")
|
||||
|
@ -120,16 +120,16 @@ func (h *Headscale) handleRegister(
|
|||
// happens
|
||||
newNode := types.Node{
|
||||
MachineKey: machineKey,
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NodeKey: registerRequest.NodeKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
if !regReq.Expiry.IsZero() {
|
||||
logTrace("Non-zero expiry time requested")
|
||||
newNode.Expiry = ®isterRequest.Expiry
|
||||
newNode.Expiry = ®Req.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
|
@ -138,7 +138,7 @@ func (h *Headscale) handleRegister(
|
|||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
h.handleNewNode(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -169,11 +169,11 @@ func (h *Headscale) handleRegister(
|
|||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered node, looking for /map
|
||||
// - Expired node wanting to reauthenticate
|
||||
if node.NodeKey.String() == registerRequest.NodeKey.String() {
|
||||
if node.NodeKey.String() == regReq.NodeKey.String() {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !registerRequest.Expiry.IsZero() &&
|
||||
registerRequest.Expiry.UTC().Before(now) {
|
||||
if !regReq.Expiry.IsZero() &&
|
||||
regReq.Expiry.UTC().Before(now) {
|
||||
h.handleNodeLogOut(writer, *node, machineKey)
|
||||
|
||||
return
|
||||
|
@ -189,11 +189,11 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if node.NodeKey.String() == registerRequest.OldNodeKey.String() &&
|
||||
if node.NodeKey.String() == regReq.OldNodeKey.String() &&
|
||||
!node.IsExpired() {
|
||||
h.handleNodeKeyRefresh(
|
||||
writer,
|
||||
registerRequest,
|
||||
regReq,
|
||||
*node,
|
||||
machineKey,
|
||||
)
|
||||
|
@ -202,11 +202,11 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// When logged out and reauthenticating with OIDC, the OldNodeKey is not passed, but the NodeKey has changed
|
||||
if node.NodeKey.String() != registerRequest.NodeKey.String() &&
|
||||
registerRequest.OldNodeKey.IsZero() && !node.IsExpired() {
|
||||
if node.NodeKey.String() != regReq.NodeKey.String() &&
|
||||
regReq.OldNodeKey.IsZero() && !node.IsExpired() {
|
||||
h.handleNodeKeyRefresh(
|
||||
writer,
|
||||
registerRequest,
|
||||
regReq,
|
||||
*node,
|
||||
machineKey,
|
||||
)
|
||||
|
@ -214,7 +214,7 @@ func (h *Headscale) handleRegister(
|
|||
return
|
||||
}
|
||||
|
||||
if registerRequest.Followup != "" {
|
||||
if regReq.Followup != "" {
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
|
@ -223,7 +223,7 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// The node has expired or it is logged out
|
||||
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey)
|
||||
h.handleNodeExpiredOrLoggedOut(writer, regReq, *node, machineKey)
|
||||
|
||||
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
|
||||
node.Expiry = &time.Time{}
|
||||
|
@ -232,7 +232,7 @@ func (h *Headscale) handleRegister(
|
|||
// we need to make sure the NodeKey matches the one in the request
|
||||
// TODO(juan): What happens when using fast user switching between two
|
||||
// headscale-managed tailnets?
|
||||
node.NodeKey = registerRequest.NodeKey
|
||||
node.NodeKey = regReq.NodeKey
|
||||
h.registrationCache.Set(
|
||||
machineKey.String(),
|
||||
*node,
|
||||
|
@ -273,8 +273,6 @@ func (h *Headscale) handleAuthKey(
|
|||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -294,13 +292,6 @@ func (h *Headscale) handleAuthKey(
|
|||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
if pak != nil {
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
Inc()
|
||||
} else {
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", "unknown").Inc()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -323,14 +314,21 @@ func (h *Headscale) handleAuthKey(
|
|||
Msg("node was already registered before, refreshing with new auth key")
|
||||
|
||||
node.NodeKey = nodeKey
|
||||
node.AuthKeyID = uint(pak.ID)
|
||||
err := h.db.NodeSetExpiry(node.ID, registerRequest.Expiry)
|
||||
pakID := uint(pak.ID)
|
||||
if pakID != 0 {
|
||||
node.AuthKeyID = &pakID
|
||||
}
|
||||
|
||||
node.Expiry = ®isterRequest.Expiry
|
||||
node.User = pak.User
|
||||
node.UserID = pak.UserID
|
||||
err := h.db.DB.Save(node).Error
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh node")
|
||||
Msg("failed to save node after logging in with auth key")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -352,13 +350,8 @@ func (h *Headscale) handleAuthKey(
|
|||
}
|
||||
}
|
||||
|
||||
mkey := node.MachineKey
|
||||
update := types.StateUpdateExpire(node.ID, registerRequest.Expiry)
|
||||
|
||||
if update.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, update, mkey.String())
|
||||
}
|
||||
ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{Type: types.StatePeerChanged, ChangeNodes: []types.NodeID{node.ID}})
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
|
@ -384,11 +377,10 @@ func (h *Headscale) handleAuthKey(
|
|||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
ForcedTags: pak.Proto().GetAclTags(),
|
||||
}
|
||||
|
||||
addrs, err := h.ipAlloc.Next()
|
||||
ipv4, ipv6, err := h.ipAlloc.Next()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
|
@ -400,24 +392,26 @@ func (h *Headscale) handleAuthKey(
|
|||
return
|
||||
}
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
if pakID != 0 {
|
||||
nodeToRegister.AuthKeyID = &pakID
|
||||
}
|
||||
node, err = h.db.RegisterNode(
|
||||
nodeToRegister,
|
||||
addrs,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("could not register node")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
h.db.Write(func(tx *gorm.DB) error {
|
||||
return db.UsePreAuthKey(tx, pak)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -425,8 +419,6 @@ func (h *Headscale) handleAuthKey(
|
|||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -445,14 +437,10 @@ func (h *Headscale) handleAuthKey(
|
|||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "success", pak.User.Name).
|
||||
Inc()
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
|
@ -466,7 +454,6 @@ func (h *Headscale) handleAuthKey(
|
|||
|
||||
log.Info().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
|
@ -538,11 +525,8 @@ func (h *Headscale) handleNodeLogOut(
|
|||
return
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdateExpire(node.ID, now)
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
|
@ -572,7 +556,7 @@ func (h *Headscale) handleNodeLogOut(
|
|||
}
|
||||
|
||||
if node.IsEphemeral() {
|
||||
err = h.db.DeleteNode(&node, h.nodeNotifier.ConnectedMap())
|
||||
changedNodes, err := h.db.DeleteNode(&node, h.nodeNotifier.LikelyConnectedMap())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
|
@ -580,13 +564,16 @@ func (h *Headscale) handleNodeLogOut(
|
|||
Msg("Cannot delete ephemeral node from the database")
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
|
||||
h.nodeNotifier.NotifyAll(ctx, stateUpdate)
|
||||
Removed: []types.NodeID{node.ID},
|
||||
})
|
||||
if changedNodes != nil {
|
||||
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -622,14 +609,10 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
nodeRegistrations.WithLabelValues("update", "web", "success", node.User.Name).
|
||||
Inc()
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
@ -660,7 +643,7 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
Str("node", node.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
|
||||
err := h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
err := h.db.Write(func(tx *gorm.DB) error {
|
||||
return db.NodeSetNodeKey(tx, &node, registerRequest.NodeKey)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -706,14 +689,14 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
|
||||
func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, regReq, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -723,8 +706,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node_key", regReq.NodeKey.ShortString()).
|
||||
Str("node_key_old", regReq.OldNodeKey.ShortString()).
|
||||
Msg("Node registration has expired or logged out. Sending a auth url to register")
|
||||
|
||||
if h.oauth2Config != nil {
|
||||
|
@ -743,14 +726,10 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
nodeRegistrations.WithLabelValues("reauth", "web", "success", node.User.Name).
|
||||
Inc()
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
@ -765,8 +744,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
log.Trace().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node_key", regReq.NodeKey.ShortString()).
|
||||
Str("node_key_old", regReq.OldNodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node logged out. Sent AuthURL for reauthentication")
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
|||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse RegisterRequest")
|
||||
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -90,7 +91,8 @@ func NewHeadscaleDatabase(
|
|||
_ = tx.Migrator().
|
||||
RenameColumn(&types.Node{}, "nickname", "given_name")
|
||||
|
||||
// If the Node table has a column for registered,
|
||||
dbConn.Model(&types.Node{}).Where("auth_key_id = ?", 0).Update("auth_key_id", nil)
|
||||
// If the Node table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
|
||||
|
@ -330,6 +332,75 @@ func NewHeadscaleDatabase(
|
|||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Replace column with IP address list with dedicated
|
||||
// IP v4 and v6 column.
|
||||
// Note that previously, the list _could_ contain more
|
||||
// than two addresses, which should not really happen.
|
||||
// In that case, the first occurence of each type will
|
||||
// be kept.
|
||||
ID: "2024041121742",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv4")
|
||||
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv6")
|
||||
|
||||
type node struct {
|
||||
ID uint64 `gorm:"column:id"`
|
||||
Addresses string `gorm:"column:ip_addresses"`
|
||||
}
|
||||
|
||||
var nodes []node
|
||||
|
||||
_ = tx.Raw("SELECT id, ip_addresses FROM nodes").Scan(&nodes).Error
|
||||
|
||||
for _, node := range nodes {
|
||||
addrs := strings.Split(node.Addresses, ",")
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return fmt.Errorf("no addresses found for node(%d)", node.ID)
|
||||
}
|
||||
|
||||
var v4 *netip.Addr
|
||||
var v6 *netip.Addr
|
||||
|
||||
for _, addrStr := range addrs {
|
||||
addr, err := netip.ParseAddr(addrStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing IP for node(%d) from database: %w", node.ID, err)
|
||||
}
|
||||
|
||||
if addr.Is4() && v4 == nil {
|
||||
v4 = &addr
|
||||
}
|
||||
|
||||
if addr.Is6() && v6 == nil {
|
||||
v6 = &addr
|
||||
}
|
||||
}
|
||||
|
||||
if v4 != nil {
|
||||
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv4", v4.String()).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if v6 != nil {
|
||||
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv6", v6.String()).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = tx.Migrator().DropColumn(&types.Node{}, "ip_addresses")
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -371,8 +442,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
|||
db, err := gorm.Open(
|
||||
sqlite.Open(cfg.Sqlite.Path+"?_synchronous=1&_journal_mode=WAL"),
|
||||
&gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: dbLogger,
|
||||
Logger: dbLogger,
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -418,8 +488,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
|||
}
|
||||
|
||||
db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: dbLogger,
|
||||
Logger: dbLogger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/netip"
|
||||
"sync"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go4.org/netipx"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
@ -20,13 +24,16 @@ import (
|
|||
type IPAllocator struct {
|
||||
mu sync.Mutex
|
||||
|
||||
prefix4 netip.Prefix
|
||||
prefix6 netip.Prefix
|
||||
prefix4 *netip.Prefix
|
||||
prefix6 *netip.Prefix
|
||||
|
||||
// Previous IPs handed out
|
||||
prev4 netip.Addr
|
||||
prev6 netip.Addr
|
||||
|
||||
// strategy used for handing out IP addresses.
|
||||
strategy types.IPAllocationStrategy
|
||||
|
||||
// Set of all IPs handed out.
|
||||
// This might not be in sync with the database,
|
||||
// but it is more conservative. If saves to the
|
||||
|
@ -40,40 +47,71 @@ type IPAllocator struct {
|
|||
// provided IPv4 and IPv6 prefix. It needs to be created
|
||||
// when headscale starts and needs to finish its read
|
||||
// transaction before any writes to the database occur.
|
||||
func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator, error) {
|
||||
var addressesSlices []string
|
||||
func NewIPAllocator(
|
||||
db *HSDatabase,
|
||||
prefix4, prefix6 *netip.Prefix,
|
||||
strategy types.IPAllocationStrategy,
|
||||
) (*IPAllocator, error) {
|
||||
ret := IPAllocator{
|
||||
prefix4: prefix4,
|
||||
prefix6: prefix6,
|
||||
|
||||
strategy: strategy,
|
||||
}
|
||||
|
||||
var v4s []sql.NullString
|
||||
var v6s []sql.NullString
|
||||
|
||||
if db != nil {
|
||||
db.Read(func(rx *gorm.DB) error {
|
||||
return rx.Model(&types.Node{}).Pluck("ip_addresses", &addressesSlices).Error
|
||||
err := db.Read(func(rx *gorm.DB) error {
|
||||
return rx.Model(&types.Node{}).Pluck("ipv4", &v4s).Error
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading IPv4 addresses from database: %w", err)
|
||||
}
|
||||
|
||||
err = db.Read(func(rx *gorm.DB) error {
|
||||
return rx.Model(&types.Node{}).Pluck("ipv6", &v6s).Error
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading IPv6 addresses from database: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var ips netipx.IPSetBuilder
|
||||
|
||||
// Add network and broadcast addrs to used pool so they
|
||||
// are not handed out to nodes.
|
||||
network4, broadcast4 := util.GetIPPrefixEndpoints(prefix4)
|
||||
network6, broadcast6 := util.GetIPPrefixEndpoints(prefix6)
|
||||
ips.Add(network4)
|
||||
ips.Add(broadcast4)
|
||||
ips.Add(network6)
|
||||
ips.Add(broadcast6)
|
||||
if prefix4 != nil {
|
||||
network4, broadcast4 := util.GetIPPrefixEndpoints(*prefix4)
|
||||
ips.Add(network4)
|
||||
ips.Add(broadcast4)
|
||||
|
||||
// Use network as starting point, it will be used to call .Next()
|
||||
// TODO(kradalby): Could potentially take all the IPs loaded from
|
||||
// the database into account to start at a more "educated" location.
|
||||
ret.prev4 = network4
|
||||
}
|
||||
|
||||
if prefix6 != nil {
|
||||
network6, broadcast6 := util.GetIPPrefixEndpoints(*prefix6)
|
||||
ips.Add(network6)
|
||||
ips.Add(broadcast6)
|
||||
|
||||
ret.prev6 = network6
|
||||
}
|
||||
|
||||
// Fetch all the IP Addresses currently handed out from the Database
|
||||
// and add them to the used IP set.
|
||||
for _, slice := range addressesSlices {
|
||||
var machineAddresses types.NodeAddresses
|
||||
err := machineAddresses.Scan(slice)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"parsing IPs from database %v: %w", machineAddresses,
|
||||
err,
|
||||
)
|
||||
}
|
||||
for _, addrStr := range append(v4s, v6s...) {
|
||||
if addrStr.Valid {
|
||||
addr, err := netip.ParseAddr(addrStr.String)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing IP address from database: %w", err)
|
||||
}
|
||||
|
||||
for _, ip := range machineAddresses {
|
||||
ips.Add(ip)
|
||||
ips.Add(addr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,42 +124,61 @@ func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator
|
|||
)
|
||||
}
|
||||
|
||||
return &IPAllocator{
|
||||
usedIPs: ips,
|
||||
ret.usedIPs = ips
|
||||
|
||||
prefix4: prefix4,
|
||||
prefix6: prefix6,
|
||||
|
||||
// Use network as starting point, it will be used to call .Next()
|
||||
// TODO(kradalby): Could potentially take all the IPs loaded from
|
||||
// the database into account to start at a more "educated" location.
|
||||
prev4: network4,
|
||||
prev6: network6,
|
||||
}, nil
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (i *IPAllocator) Next() (types.NodeAddresses, error) {
|
||||
func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
v4, err := i.next(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("allocating IPv4 address: %w", err)
|
||||
var err error
|
||||
var ret4 *netip.Addr
|
||||
var ret6 *netip.Addr
|
||||
|
||||
if i.prefix4 != nil {
|
||||
ret4, err = i.next(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err)
|
||||
}
|
||||
i.prev4 = *ret4
|
||||
}
|
||||
|
||||
v6, err := i.next(i.prev6, i.prefix6)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("allocating IPv6 address: %w", err)
|
||||
if i.prefix6 != nil {
|
||||
ret6, err = i.next(i.prev6, i.prefix6)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err)
|
||||
}
|
||||
i.prev6 = *ret6
|
||||
}
|
||||
|
||||
return types.NodeAddresses{*v4, *v6}, nil
|
||||
return ret4, ret6, nil
|
||||
}
|
||||
|
||||
var ErrCouldNotAllocateIP = errors.New("failed to allocate IP")
|
||||
|
||||
func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, error) {
|
||||
// Get the first IP in our prefix
|
||||
ip := prev.Next()
|
||||
func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
return i.next(prev, prefix)
|
||||
}
|
||||
|
||||
func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) {
|
||||
var err error
|
||||
var ip netip.Addr
|
||||
|
||||
switch i.strategy {
|
||||
case types.IPAllocationStrategySequential:
|
||||
// Get the first IP in our prefix
|
||||
ip = prev.Next()
|
||||
case types.IPAllocationStrategyRandom:
|
||||
ip, err = randomNext(*prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting random IP: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): maybe this can be done less often.
|
||||
set, err := i.usedIPs.IPSet()
|
||||
|
@ -136,7 +193,15 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e
|
|||
|
||||
// Check if the IP has already been allocated.
|
||||
if set.Contains(ip) {
|
||||
ip = ip.Next()
|
||||
switch i.strategy {
|
||||
case types.IPAllocationStrategySequential:
|
||||
ip = ip.Next()
|
||||
case types.IPAllocationStrategyRandom:
|
||||
ip, err = randomNext(*prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting random IP: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -146,3 +211,120 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e
|
|||
return &ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
func randomNext(pfx netip.Prefix) (netip.Addr, error) {
|
||||
rang := netipx.RangeOfPrefix(pfx)
|
||||
fromIP, toIP := rang.From(), rang.To()
|
||||
|
||||
var from, to big.Int
|
||||
|
||||
from.SetBytes(fromIP.AsSlice())
|
||||
to.SetBytes(toIP.AsSlice())
|
||||
|
||||
// Find the max, this is how we can do "random range",
|
||||
// get the "max" as 0 -> to - from and then add back from
|
||||
// after.
|
||||
tempMax := big.NewInt(0).Sub(&to, &from)
|
||||
|
||||
out, err := rand.Int(rand.Reader, tempMax)
|
||||
if err != nil {
|
||||
return netip.Addr{}, fmt.Errorf("generating random IP: %w", err)
|
||||
}
|
||||
|
||||
valInRange := big.NewInt(0).Add(&from, out)
|
||||
|
||||
ip, ok := netip.AddrFromSlice(valInRange.Bytes())
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("generated ip bytes are invalid ip")
|
||||
}
|
||||
|
||||
if !pfx.Contains(ip) {
|
||||
return netip.Addr{}, fmt.Errorf(
|
||||
"generated ip(%s) not in prefix(%s)",
|
||||
ip.String(),
|
||||
pfx.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// BackfillNodeIPs will take a database transaction, and
|
||||
// iterate through all of the current nodes in headscale
|
||||
// and ensure it has IP addresses according to the current
|
||||
// configuration.
|
||||
// This means that if both IPv4 and IPv6 is set in the
|
||||
// config, and some nodes are missing that type of IP,
|
||||
// it will be added.
|
||||
// If a prefix type has been removed (IPv4 or IPv6), it
|
||||
// will remove the IPs in that family from the node.
|
||||
func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
var err error
|
||||
var ret []string
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
if i == nil {
|
||||
return errors.New("backfilling IPs: ip allocator was nil")
|
||||
}
|
||||
|
||||
log.Trace().Msgf("starting to backfill IPs")
|
||||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing nodes to backfill IPs: %w", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
log.Trace().Uint64("node.id", node.ID.Uint64()).Msg("checking if need backfill")
|
||||
|
||||
changed := false
|
||||
// IPv4 prefix is set, but node ip is missing, alloc
|
||||
if i.prefix4 != nil && node.IPv4 == nil {
|
||||
ret4, err := i.nextLocked(i.prev4, i.prefix4)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv4 = ret4
|
||||
changed = true
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
// IPv6 prefix is set, but node ip is missing, alloc
|
||||
if i.prefix6 != nil && node.IPv6 == nil {
|
||||
ret6, err := i.nextLocked(i.prev6, i.prefix6)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
node.IPv6 = ret6
|
||||
changed = true
|
||||
ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname))
|
||||
}
|
||||
|
||||
// IPv4 prefix is not set, but node has IP, remove
|
||||
if i.prefix4 == nil && node.IPv4 != nil {
|
||||
ret = append(ret, fmt.Sprintf("removing IPv4 %q from Node(%d) %q", node.IPv4.String(), node.ID, node.Hostname))
|
||||
node.IPv4 = nil
|
||||
changed = true
|
||||
}
|
||||
|
||||
// IPv6 prefix is not set, but node has IP, remove
|
||||
if i.prefix6 == nil && node.IPv6 != nil {
|
||||
ret = append(ret, fmt.Sprintf("removing IPv6 %q from Node(%d) %q", node.IPv6.String(), node.ID, node.Hostname))
|
||||
node.IPv6 = nil
|
||||
changed = true
|
||||
}
|
||||
|
||||
if changed {
|
||||
err := tx.Save(node).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return ret, err
|
||||
}
|
||||
|
|
|
@ -1,49 +1,41 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
)
|
||||
|
||||
func TestIPAllocator(t *testing.T) {
|
||||
mpp := func(pref string) netip.Prefix {
|
||||
return netip.MustParsePrefix(pref)
|
||||
}
|
||||
na := func(pref string) netip.Addr {
|
||||
return netip.MustParseAddr(pref)
|
||||
}
|
||||
newDb := func() *HSDatabase {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale-db-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("creating temp dir: %s", err)
|
||||
}
|
||||
db, _ = NewHeadscaleDatabase(
|
||||
types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: tmpDir + "/headscale_test.db",
|
||||
},
|
||||
},
|
||||
"",
|
||||
)
|
||||
|
||||
return db
|
||||
}
|
||||
var mpp = func(pref string) *netip.Prefix {
|
||||
p := netip.MustParsePrefix(pref)
|
||||
return &p
|
||||
}
|
||||
var na = func(pref string) netip.Addr {
|
||||
return netip.MustParseAddr(pref)
|
||||
}
|
||||
var nap = func(pref string) *netip.Addr {
|
||||
n := na(pref)
|
||||
return &n
|
||||
}
|
||||
|
||||
func TestIPAllocatorSequential(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dbFunc func() *HSDatabase
|
||||
|
||||
prefix4 netip.Prefix
|
||||
prefix6 netip.Prefix
|
||||
prefix4 *netip.Prefix
|
||||
prefix6 *netip.Prefix
|
||||
getCount int
|
||||
want []types.NodeAddresses
|
||||
want4 []netip.Addr
|
||||
want6 []netip.Addr
|
||||
}{
|
||||
{
|
||||
name: "simple",
|
||||
|
@ -56,23 +48,52 @@ func TestIPAllocator(t *testing.T) {
|
|||
|
||||
getCount: 1,
|
||||
|
||||
want: []types.NodeAddresses{
|
||||
{
|
||||
na("100.64.0.1"),
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
want4: []netip.Addr{
|
||||
na("100.64.0.1"),
|
||||
},
|
||||
want6: []netip.Addr{
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-v4",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
|
||||
getCount: 1,
|
||||
|
||||
want4: []netip.Addr{
|
||||
na("100.64.0.1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-v6",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
getCount: 1,
|
||||
|
||||
want6: []netip.Addr{
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-with-db",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := newDb()
|
||||
db := dbForTest(t, "simple-with-db")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
IPAddresses: types.NodeAddresses{
|
||||
na("100.64.0.1"),
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.1"),
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
})
|
||||
|
||||
return db
|
||||
|
@ -83,23 +104,24 @@ func TestIPAllocator(t *testing.T) {
|
|||
|
||||
getCount: 1,
|
||||
|
||||
want: []types.NodeAddresses{
|
||||
{
|
||||
na("100.64.0.2"),
|
||||
na("fd7a:115c:a1e0::2"),
|
||||
},
|
||||
want4: []netip.Addr{
|
||||
na("100.64.0.2"),
|
||||
},
|
||||
want6: []netip.Addr{
|
||||
na("fd7a:115c:a1e0::2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "before-after-free-middle-in-db",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := newDb()
|
||||
db := dbForTest(t, "before-after-free-middle-in-db")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
IPAddresses: types.NodeAddresses{
|
||||
na("100.64.0.2"),
|
||||
na("fd7a:115c:a1e0::2"),
|
||||
},
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.2"),
|
||||
IPv6: nap("fd7a:115c:a1e0::2"),
|
||||
})
|
||||
|
||||
return db
|
||||
|
@ -110,15 +132,13 @@ func TestIPAllocator(t *testing.T) {
|
|||
|
||||
getCount: 2,
|
||||
|
||||
want: []types.NodeAddresses{
|
||||
{
|
||||
na("100.64.0.1"),
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
{
|
||||
na("100.64.0.3"),
|
||||
na("fd7a:115c:a1e0::3"),
|
||||
},
|
||||
want4: []netip.Addr{
|
||||
na("100.64.0.1"),
|
||||
na("100.64.0.3"),
|
||||
},
|
||||
want6: []netip.Addr{
|
||||
na("fd7a:115c:a1e0::1"),
|
||||
na("fd7a:115c:a1e0::3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -127,24 +147,367 @@ func TestIPAllocator(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := tt.dbFunc()
|
||||
|
||||
alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6)
|
||||
alloc, _ := NewIPAllocator(
|
||||
db,
|
||||
tt.prefix4,
|
||||
tt.prefix6,
|
||||
types.IPAllocationStrategySequential,
|
||||
)
|
||||
|
||||
spew.Dump(alloc)
|
||||
|
||||
t.Logf("prefixes: %q, %q", tt.prefix4.String(), tt.prefix6.String())
|
||||
|
||||
var got []types.NodeAddresses
|
||||
var got4s []netip.Addr
|
||||
var got6s []netip.Addr
|
||||
|
||||
for range tt.getCount {
|
||||
gotSet, err := alloc.Next()
|
||||
got4, got6, err := alloc.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("allocating next IP: %s", err)
|
||||
}
|
||||
|
||||
got = append(got, gotSet)
|
||||
if got4 != nil {
|
||||
got4s = append(got4s, *got4)
|
||||
}
|
||||
|
||||
if got6 != nil {
|
||||
got6s = append(got6s, *got6)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("IPAllocator unexpected result (-want +got):\n%s", diff)
|
||||
if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" {
|
||||
t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want6, got6s, util.Comparers...); diff != "" {
|
||||
t.Errorf("IPAllocator 6s unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPAllocatorRandom(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dbFunc func() *HSDatabase
|
||||
|
||||
getCount int
|
||||
|
||||
prefix4 *netip.Prefix
|
||||
prefix6 *netip.Prefix
|
||||
want4 bool
|
||||
want6 bool
|
||||
}{
|
||||
{
|
||||
name: "simple",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
getCount: 1,
|
||||
|
||||
want4: true,
|
||||
want6: true,
|
||||
},
|
||||
{
|
||||
name: "simple-v4",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
|
||||
getCount: 1,
|
||||
|
||||
want4: true,
|
||||
want6: false,
|
||||
},
|
||||
{
|
||||
name: "simple-v6",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
getCount: 1,
|
||||
|
||||
want4: false,
|
||||
want6: true,
|
||||
},
|
||||
{
|
||||
name: "generate-lots-of-random",
|
||||
dbFunc: func() *HSDatabase {
|
||||
return nil
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
getCount: 1000,
|
||||
|
||||
want4: true,
|
||||
want6: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := tt.dbFunc()
|
||||
|
||||
alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategyRandom)
|
||||
|
||||
spew.Dump(alloc)
|
||||
|
||||
for range tt.getCount {
|
||||
got4, got6, err := alloc.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("allocating next IP: %s", err)
|
||||
}
|
||||
|
||||
t.Logf("addrs ipv4: %v, ipv6: %v", got4, got6)
|
||||
|
||||
if tt.want4 {
|
||||
if got4 == nil {
|
||||
t.Fatalf("expected ipv4 addr, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
if tt.want6 {
|
||||
if got6 == nil {
|
||||
t.Fatalf("expected ipv4 addr, got nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackfillIPAddresses(t *testing.T) {
|
||||
fullNodeP := func(i int) *types.Node {
|
||||
v4 := fmt.Sprintf("100.64.0.%d", i)
|
||||
v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i)
|
||||
return &types.Node{
|
||||
IPv4DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: v4,
|
||||
},
|
||||
IPv4: nap(v4),
|
||||
IPv6DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: v6,
|
||||
},
|
||||
IPv6: nap(v6),
|
||||
}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
dbFunc func() *HSDatabase
|
||||
|
||||
prefix4 *netip.Prefix
|
||||
prefix6 *netip.Prefix
|
||||
want types.Nodes
|
||||
}{
|
||||
{
|
||||
name: "simple-backfill-ipv6",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := dbForTest(t, "simple-backfill-ipv6")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.1"),
|
||||
})
|
||||
|
||||
return db
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
want: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "100.64.0.1",
|
||||
},
|
||||
IPv4: nap("100.64.0.1"),
|
||||
IPv6DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "fd7a:115c:a1e0::1",
|
||||
},
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-backfill-ipv4",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := dbForTest(t, "simple-backfill-ipv4")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
})
|
||||
|
||||
return db
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
want: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "100.64.0.1",
|
||||
},
|
||||
IPv4: nap("100.64.0.1"),
|
||||
IPv6DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "fd7a:115c:a1e0::1",
|
||||
},
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-backfill-remove-ipv6",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := dbForTest(t, "simple-backfill-remove-ipv6")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.1"),
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
})
|
||||
|
||||
return db
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
|
||||
want: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "100.64.0.1",
|
||||
},
|
||||
IPv4: nap("100.64.0.1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple-backfill-remove-ipv4",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := dbForTest(t, "simple-backfill-remove-ipv4")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.1"),
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
})
|
||||
|
||||
return db
|
||||
},
|
||||
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
want: types.Nodes{
|
||||
&types.Node{
|
||||
IPv6DatabaseField: sql.NullString{
|
||||
Valid: true,
|
||||
String: "fd7a:115c:a1e0::1",
|
||||
},
|
||||
IPv6: nap("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi-backfill-ipv6",
|
||||
dbFunc: func() *HSDatabase {
|
||||
db := dbForTest(t, "simple-backfill-ipv6")
|
||||
user := types.User{Name: ""}
|
||||
db.DB.Save(&user)
|
||||
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.1"),
|
||||
})
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.2"),
|
||||
})
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.3"),
|
||||
})
|
||||
db.DB.Save(&types.Node{
|
||||
User: user,
|
||||
IPv4: nap("100.64.0.4"),
|
||||
})
|
||||
|
||||
return db
|
||||
},
|
||||
|
||||
prefix4: mpp("100.64.0.0/10"),
|
||||
prefix6: mpp("fd7a:115c:a1e0::/48"),
|
||||
|
||||
want: types.Nodes{
|
||||
fullNodeP(1),
|
||||
fullNodeP(2),
|
||||
fullNodeP(3),
|
||||
fullNodeP(4),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
comps := append(util.Comparers, cmpopts.IgnoreFields(types.Node{},
|
||||
"ID",
|
||||
"MachineKeyDatabaseField",
|
||||
"NodeKeyDatabaseField",
|
||||
"DiscoKeyDatabaseField",
|
||||
"User",
|
||||
"UserID",
|
||||
"Endpoints",
|
||||
"HostinfoDatabaseField",
|
||||
"Hostinfo",
|
||||
"Routes",
|
||||
"CreatedAt",
|
||||
"UpdatedAt",
|
||||
))
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := tt.dbFunc()
|
||||
|
||||
alloc, err := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategySequential)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set up ip alloc: %s", err)
|
||||
}
|
||||
|
||||
logs, err := db.BackfillNodeIPs(alloc)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to backfill: %s", err)
|
||||
}
|
||||
|
||||
t.Logf("backfill log: \n%s", strings.Join(logs, "\n"))
|
||||
|
||||
got, err := db.ListNodes()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get nodes: %s", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, comps...); diff != "" {
|
||||
t.Errorf("Backfill unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
"fmt"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
|
@ -34,27 +34,22 @@ var (
|
|||
)
|
||||
)
|
||||
|
||||
func (hsdb *HSDatabase) ListPeers(node *types.Node) (types.Nodes, error) {
|
||||
func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID) (types.Nodes, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
||||
return ListPeers(rx, node)
|
||||
return ListPeers(rx, nodeID)
|
||||
})
|
||||
}
|
||||
|
||||
// ListPeers returns all peers of node, regardless of any Policy or if the node is expired.
|
||||
func ListPeers(tx *gorm.DB, node *types.Node) (types.Nodes, error) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Finding direct peers")
|
||||
|
||||
func ListPeers(tx *gorm.DB, nodeID types.NodeID) (types.Nodes, error) {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Preload("Routes").
|
||||
Where("node_key <> ?",
|
||||
node.NodeKey.String()).Find(&nodes).Error; err != nil {
|
||||
Where("id <> ?",
|
||||
nodeID).Find(&nodes).Error; err != nil {
|
||||
return types.Nodes{}, err
|
||||
}
|
||||
|
||||
|
@ -119,14 +114,14 @@ func getNode(tx *gorm.DB, user string, name string) (*types.Node, error) {
|
|||
return nil, ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) GetNodeByID(id uint64) (*types.Node, error) {
|
||||
func (hsdb *HSDatabase) GetNodeByID(id types.NodeID) (*types.Node, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) (*types.Node, error) {
|
||||
return GetNodeByID(rx, id)
|
||||
})
|
||||
}
|
||||
|
||||
// GetNodeByID finds a Node by ID and returns the Node struct.
|
||||
func GetNodeByID(tx *gorm.DB, id uint64) (*types.Node, error) {
|
||||
func GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) {
|
||||
mach := types.Node{}
|
||||
if result := tx.
|
||||
Preload("AuthKey").
|
||||
|
@ -197,7 +192,7 @@ func GetNodeByAnyKey(
|
|||
}
|
||||
|
||||
func (hsdb *HSDatabase) SetTags(
|
||||
nodeID uint64,
|
||||
nodeID types.NodeID,
|
||||
tags []string,
|
||||
) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
|
@ -208,10 +203,15 @@ func (hsdb *HSDatabase) SetTags(
|
|||
// SetTags takes a Node struct pointer and update the forced tags.
|
||||
func SetTags(
|
||||
tx *gorm.DB,
|
||||
nodeID uint64,
|
||||
nodeID types.NodeID,
|
||||
tags []string,
|
||||
) error {
|
||||
if len(tags) == 0 {
|
||||
// if no tags are provided, we remove all forced tags
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("forced_tags", types.StringList{}).Error; err != nil {
|
||||
return fmt.Errorf("failed to remove tags for node in the database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -238,15 +238,7 @@ func RenameNode(tx *gorm.DB,
|
|||
newName,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RenameNode").
|
||||
Uint64("nodeID", nodeID).
|
||||
Str("newName", newName).
|
||||
Err(err).
|
||||
Msg("failed to rename node")
|
||||
|
||||
return err
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil {
|
||||
|
@ -256,7 +248,7 @@ func RenameNode(tx *gorm.DB,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error {
|
||||
func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry time.Time) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return NodeSetExpiry(tx, nodeID, expiry)
|
||||
})
|
||||
|
@ -264,14 +256,14 @@ func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error {
|
|||
|
||||
// NodeSetExpiry takes a Node struct and a new expiry time.
|
||||
func NodeSetExpiry(tx *gorm.DB,
|
||||
nodeID uint64, expiry time.Time,
|
||||
nodeID types.NodeID, expiry time.Time,
|
||||
) error {
|
||||
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.MachinePublic]bool) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return DeleteNode(tx, node, isConnected)
|
||||
func (hsdb *HSDatabase) DeleteNode(node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
|
||||
return DeleteNode(tx, node, isLikelyConnected)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -279,24 +271,24 @@ func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.Machine
|
|||
// Caller is responsible for notifying all of change.
|
||||
func DeleteNode(tx *gorm.DB,
|
||||
node *types.Node,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
) error {
|
||||
err := deleteNodeRoutes(tx, node, map[key.MachinePublic]bool{})
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
) ([]types.NodeID, error) {
|
||||
changed, err := deleteNodeRoutes(tx, node, isLikelyConnected)
|
||||
if err != nil {
|
||||
return err
|
||||
return changed, err
|
||||
}
|
||||
|
||||
// Unscoped causes the node to be fully removed from the database.
|
||||
if err := tx.Unscoped().Delete(&node).Error; err != nil {
|
||||
return err
|
||||
if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil {
|
||||
return changed, err
|
||||
}
|
||||
|
||||
return nil
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// UpdateLastSeen sets a node's last seen field indicating that we
|
||||
// SetLastSeen sets a node's last seen field indicating that we
|
||||
// have recently communicating with this node.
|
||||
func UpdateLastSeen(tx *gorm.DB, nodeID uint64, lastSeen time.Time) error {
|
||||
func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
|
||||
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error
|
||||
}
|
||||
|
||||
|
@ -307,7 +299,8 @@ func RegisterNodeFromAuthCallback(
|
|||
userName string,
|
||||
nodeExpiry *time.Time,
|
||||
registrationMethod string,
|
||||
addrs types.NodeAddresses,
|
||||
ipv4 *netip.Addr,
|
||||
ipv6 *netip.Addr,
|
||||
) (*types.Node, error) {
|
||||
log.Debug().
|
||||
Str("machine_key", mkey.ShortString()).
|
||||
|
@ -343,7 +336,7 @@ func RegisterNodeFromAuthCallback(
|
|||
node, err := RegisterNode(
|
||||
tx,
|
||||
registrationNode,
|
||||
addrs,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
|
@ -359,14 +352,14 @@ func RegisterNodeFromAuthCallback(
|
|||
return nil, ErrNodeNotFoundRegistrationCache
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) RegisterNode(node types.Node, addrs types.NodeAddresses) (*types.Node, error) {
|
||||
func (hsdb *HSDatabase) RegisterNode(node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
return RegisterNode(tx, node, addrs)
|
||||
return RegisterNode(tx, node, ipv4, ipv6)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterNode is executed from the CLI to register a new Node using its MachineKey.
|
||||
func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*types.Node, error) {
|
||||
func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {
|
||||
log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
|
@ -374,10 +367,10 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ
|
|||
Str("user", node.User.Name).
|
||||
Msg("Registering node")
|
||||
|
||||
// If the node exists and we had already IPs for it, we just save it
|
||||
// If the node exists and it already has IP(s), we just save it
|
||||
// so we store the node.Expire and node.Nodekey that has been set when
|
||||
// adding it to the registrationCache
|
||||
if len(node.IPAddresses) > 0 {
|
||||
if node.IPv4 != nil || node.IPv6 != nil {
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register existing node in the database: %w", err)
|
||||
}
|
||||
|
@ -393,7 +386,8 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ
|
|||
return &node, nil
|
||||
}
|
||||
|
||||
node.IPAddresses = addrs
|
||||
node.IPv4 = ipv4
|
||||
node.IPv6 = ipv6
|
||||
|
||||
if err := tx.Save(&node).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed register(save) node in the database: %w", err)
|
||||
|
@ -402,7 +396,6 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ
|
|||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("ip", strings.Join(addrs.StringSlice(), ",")).
|
||||
Msg("Node registered with the database")
|
||||
|
||||
return &node, nil
|
||||
|
@ -456,13 +449,7 @@ func GetAdvertisedRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error)
|
|||
Preload("Node").
|
||||
Where("node_id = ? AND advertised = ?", node.ID, true).Find(&routes).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Could not get advertised routes for node")
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("getting advertised routes for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
prefixes := []netip.Prefix{}
|
||||
|
@ -488,13 +475,7 @@ func GetEnabledRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) {
|
|||
Where("node_id = ? AND advertised = ? AND enabled = ?", node.ID, true, true).
|
||||
Find(&routes).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Could not get enabled routes for node")
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("getting enabled routes for node(%d): %w", node.ID, err)
|
||||
}
|
||||
|
||||
prefixes := []netip.Prefix{}
|
||||
|
@ -513,8 +494,6 @@ func IsRoutesEnabled(tx *gorm.DB, node *types.Node, routeStr string) bool {
|
|||
|
||||
enabledRoutes, err := GetEnabledRoutes(tx, node)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Could not get enabled routes")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -606,7 +585,7 @@ func enableRoutes(tx *gorm.DB,
|
|||
|
||||
return &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
ChangeNodes: []types.NodeID{node.ID},
|
||||
Message: "created in db.enableRoutes",
|
||||
}, nil
|
||||
}
|
||||
|
@ -681,59 +660,49 @@ func GenerateGivenName(
|
|||
return givenName, nil
|
||||
}
|
||||
|
||||
func ExpireEphemeralNodes(tx *gorm.DB,
|
||||
inactivityThreshhold time.Duration,
|
||||
) (types.StateUpdate, bool) {
|
||||
func DeleteExpiredEphemeralNodes(tx *gorm.DB,
|
||||
inactivityThreshold time.Duration,
|
||||
) ([]types.NodeID, []types.NodeID) {
|
||||
users, err := ListUsers(tx)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing users")
|
||||
|
||||
return types.StateUpdate{}, false
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
expired := make([]tailcfg.NodeID, 0)
|
||||
var expired []types.NodeID
|
||||
var changedNodes []types.NodeID
|
||||
for _, user := range users {
|
||||
nodes, err := ListNodesByUser(tx, user.Name)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("user", user.Name).
|
||||
Msg("Error listing nodes in user")
|
||||
|
||||
return types.StateUpdate{}, false
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for idx, node := range nodes {
|
||||
if node.IsEphemeral() && node.LastSeen != nil &&
|
||||
time.Now().
|
||||
After(node.LastSeen.Add(inactivityThreshhold)) {
|
||||
expired = append(expired, tailcfg.NodeID(node.ID))
|
||||
After(node.LastSeen.Add(inactivityThreshold)) {
|
||||
expired = append(expired, node.ID)
|
||||
|
||||
log.Info().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Ephemeral client removed from database")
|
||||
|
||||
// empty isConnected map as ephemeral nodes are not routes
|
||||
err = DeleteNode(tx, nodes[idx], map[key.MachinePublic]bool{})
|
||||
changed, err := DeleteNode(tx, nodes[idx], nil)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Msg("🤮 Cannot delete ephemeral node from the database")
|
||||
}
|
||||
|
||||
changedNodes = append(changedNodes, changed...)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): needs to be moved out of transaction
|
||||
}
|
||||
if len(expired) > 0 {
|
||||
return types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: expired,
|
||||
}, true
|
||||
}
|
||||
|
||||
return types.StateUpdate{}, false
|
||||
return expired, changedNodes
|
||||
}
|
||||
|
||||
func ExpireExpiredNodes(tx *gorm.DB,
|
||||
|
@ -748,41 +717,14 @@ func ExpireExpiredNodes(tx *gorm.DB,
|
|||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Error listing nodes to find expired nodes")
|
||||
|
||||
return time.Unix(0, 0), types.StateUpdate{}, false
|
||||
}
|
||||
for index, node := range nodes {
|
||||
if node.IsExpired() &&
|
||||
// TODO(kradalby): Replace this, it is very spammy
|
||||
// It will notify about all nodes that has been expired.
|
||||
// It should only notify about expired nodes since _last check_.
|
||||
node.Expiry.After(lastCheck) {
|
||||
for _, node := range nodes {
|
||||
if node.IsExpired() && node.Expiry.After(lastCheck) {
|
||||
expired = append(expired, &tailcfg.PeerChange{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: node.Expiry,
|
||||
})
|
||||
|
||||
now := time.Now()
|
||||
// Do not use setNodeExpiry as that has a notifier hook, which
|
||||
// can cause a deadlock, we are updating all changed nodes later
|
||||
// and there is no point in notifiying twice.
|
||||
if err := tx.Model(&nodes[index]).Updates(types.Node{
|
||||
Expiry: &now,
|
||||
}).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Str("name", node.GivenName).
|
||||
Msg("🤮 Cannot expire node")
|
||||
} else {
|
||||
log.Info().
|
||||
Str("node", node.Hostname).
|
||||
Str("name", node.GivenName).
|
||||
Msg("Node successfully expired")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
|
@ -28,6 +29,7 @@ func (s *Suite) TestGetNode(c *check.C) {
|
|||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
pakID := uint(pak.ID)
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
|
@ -36,9 +38,10 @@ func (s *Suite) TestGetNode(c *check.C) {
|
|||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(node)
|
||||
trx := db.DB.Save(node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
_, err = db.getNode("test", "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -57,6 +60,7 @@ func (s *Suite) TestGetNodeByID(c *check.C) {
|
|||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
|
@ -64,9 +68,10 @@ func (s *Suite) TestGetNodeByID(c *check.C) {
|
|||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
_, err = db.GetNodeByID(0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -87,6 +92,7 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
|
|||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
|
@ -94,9 +100,10 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
|
|||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
_, err = db.GetNodeByAnyKey(machineKey.Public(), nodeKey.Public(), oldNodeKey.Public())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -116,11 +123,11 @@ func (s *Suite) TestHardDeleteNode(c *check.C) {
|
|||
Hostname: "testnode3",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(1),
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
err = db.DeleteNode(&node, map[key.MachinePublic]bool{})
|
||||
_, err = db.DeleteNode(&node, xsync.NewMapOf[types.NodeID, bool]())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = db.getNode(user.Name, "testnode3")
|
||||
|
@ -137,26 +144,28 @@ func (s *Suite) TestListPeers(c *check.C) {
|
|||
_, err = db.GetNodeByID(0)
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
for index := 0; index <= 10; index++ {
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := types.Node{
|
||||
ID: uint64(index),
|
||||
ID: types.NodeID(index),
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostname: "testnode" + strconv.Itoa(index),
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
}
|
||||
|
||||
node0ByID, err := db.GetNodeByID(0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
peersOfNode0, err := db.ListPeers(node0ByID)
|
||||
peersOfNode0, err := db.ListPeers(node0ByID.ID)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(len(peersOfNode0), check.Equals, 9)
|
||||
|
@ -187,20 +196,21 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
|
|||
for index := 0; index <= 10; index++ {
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
pakID := uint(stor[index%2].key.ID)
|
||||
|
||||
v4 := netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1)))
|
||||
node := types.Node{
|
||||
ID: uint64(index),
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
IPAddresses: types.NodeAddresses{
|
||||
netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))),
|
||||
},
|
||||
ID: types.NodeID(index),
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
IPv4: &v4,
|
||||
Hostname: "testnode" + strconv.Itoa(index),
|
||||
UserID: stor[index%2].user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(stor[index%2].key.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
}
|
||||
|
||||
aclPolicy := &policy.ACLPolicy{
|
||||
|
@ -232,16 +242,16 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
|
|||
c.Logf("Node(%v), user: %v", testNode.Hostname, testNode.User)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
adminPeers, err := db.ListPeers(adminNode)
|
||||
adminPeers, err := db.ListPeers(adminNode.ID)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testPeers, err := db.ListPeers(testNode)
|
||||
testPeers, err := db.ListPeers(testNode.ID)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
adminRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, adminNode, adminPeers)
|
||||
adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, testNode, testPeers)
|
||||
testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules)
|
||||
|
@ -272,6 +282,7 @@ func (s *Suite) TestExpireNode(c *check.C) {
|
|||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
pakID := uint(pak.ID)
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
|
@ -280,7 +291,7 @@ func (s *Suite) TestExpireNode(c *check.C) {
|
|||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
db.DB.Save(node)
|
||||
|
@ -301,27 +312,6 @@ func (s *Suite) TestExpireNode(c *check.C) {
|
|||
c.Assert(nodeFromDB.IsExpired(), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestSerdeAddressStrignSlice(c *check.C) {
|
||||
input := types.NodeAddresses([]netip.Addr{
|
||||
netip.MustParseAddr("192.0.2.1"),
|
||||
netip.MustParseAddr("2001:db8::1"),
|
||||
})
|
||||
serialized, err := input.Value()
|
||||
c.Assert(err, check.IsNil)
|
||||
if serial, ok := serialized.(string); ok {
|
||||
c.Assert(serial, check.Equals, "192.0.2.1,2001:db8::1")
|
||||
}
|
||||
|
||||
var deserialized types.NodeAddresses
|
||||
err = deserialized.Scan(serialized)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(len(deserialized), check.Equals, len(input))
|
||||
for i := range deserialized {
|
||||
c.Assert(deserialized[i], check.Equals, input[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Suite) TestGenerateGivenName(c *check.C) {
|
||||
user1, err := db.CreateUser("user-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -337,6 +327,7 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
|
|||
|
||||
machineKey2 := key.NewMachine()
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
|
@ -345,9 +336,11 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
|
|||
GivenName: "hostname-1",
|
||||
UserID: user1.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(node)
|
||||
|
||||
trx := db.DB.Save(node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2")
|
||||
comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict")
|
||||
|
@ -378,6 +371,7 @@ func (s *Suite) TestSetTags(c *check.C) {
|
|||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
|
@ -385,9 +379,11 @@ func (s *Suite) TestSetTags(c *check.C) {
|
|||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(node)
|
||||
|
||||
trx := db.DB.Save(node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
// assign simple tags
|
||||
sTags := []string{"tag:test", "tag:foo"}
|
||||
|
@ -397,7 +393,7 @@ func (s *Suite) TestSetTags(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
c.Assert(node.ForcedTags, check.DeepEquals, types.StringList(sTags))
|
||||
|
||||
// assign duplicat tags, expect no errors but no doubles in DB
|
||||
// assign duplicate tags, expect no errors but no doubles in DB
|
||||
eTags := []string{"tag:bar", "tag:test", "tag:unknown", "tag:test"}
|
||||
err = db.SetTags(node.ID, eTags)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -408,6 +404,13 @@ func (s *Suite) TestSetTags(c *check.C) {
|
|||
check.DeepEquals,
|
||||
types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}),
|
||||
)
|
||||
|
||||
// test removing tags
|
||||
err = db.SetTags(node.ID, []string{})
|
||||
c.Assert(err, check.IsNil)
|
||||
node, err = db.getNode("test", "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(node.ForcedTags, check.DeepEquals, types.StringList([]string{}))
|
||||
}
|
||||
|
||||
func TestHeadscale_generateGivenName(t *testing.T) {
|
||||
|
@ -561,6 +564,8 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
// Check if a subprefix of an autoapproved route is approved
|
||||
route2 := netip.MustParsePrefix("10.11.0.0/24")
|
||||
|
||||
v4 := netip.MustParseAddr("100.64.0.1")
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
|
@ -568,15 +573,16 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
Hostname: "test",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RequestTags: []string{"tag:exit"},
|
||||
RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2},
|
||||
},
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
IPv4: &v4,
|
||||
}
|
||||
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
sendUpdate, err := db.SaveNodeRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -586,7 +592,7 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
// TODO(kradalby): Check state update
|
||||
_, err = db.EnableAutoApprovedRoutes(pol, node0ByID)
|
||||
err = db.EnableAutoApprovedRoutes(pol, node0ByID)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes, err := db.GetEnabledRoutes(node0ByID)
|
||||
|
|
|
@ -83,7 +83,7 @@ func CreatePreAuthKey(
|
|||
if !seenTags[tag] {
|
||||
if err := tx.Save(&types.PreAuthKeyACLTag{PreAuthKeyID: key.ID, Tag: tag}).Error; err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to ceate key tag in the database: %w",
|
||||
"failed to create key tag in the database: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
@ -92,10 +92,6 @@ func CreatePreAuthKey(
|
|||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
|
@ -201,9 +197,10 @@ func ValidatePreAuthKey(tx *gorm.DB, k string) (*types.PreAuthKey, error) {
|
|||
}
|
||||
|
||||
nodes := types.Nodes{}
|
||||
pakID := uint(pak.ID)
|
||||
if err := tx.
|
||||
Preload("AuthKey").
|
||||
Where(&types.Node{AuthKeyID: uint(pak.ID)}).
|
||||
Where(&types.Node{AuthKeyID: &pakID}).
|
||||
Find(&nodes).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -76,14 +76,16 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
|
|||
pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
key, err := db.ValidatePreAuthKey(pak.Key)
|
||||
c.Assert(err, check.Equals, ErrSingleUseAuthKeyHasBeenUsed)
|
||||
|
@ -97,14 +99,16 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
|
|||
pak, err := db.CreatePreAuthKey(user.Name, true, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
key, err := db.ValidatePreAuthKey(pak.Key)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -131,15 +135,17 @@ func (*Suite) TestEphemeralKeyReusable(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
now := time.Now().Add(-time.Second * 30)
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
_, err = db.ValidatePreAuthKey(pak.Key)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -147,8 +153,8 @@ func (*Suite) TestEphemeralKeyReusable(c *check.C) {
|
|||
_, err = db.getNode("test7", "testest")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
ExpireEphemeralNodes(tx, time.Second*20)
|
||||
db.Write(func(tx *gorm.DB) error {
|
||||
DeleteExpiredEphemeralNodes(tx, time.Second*20)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -165,13 +171,14 @@ func (*Suite) TestEphemeralKeyNotReusable(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
now := time.Now().Add(-time.Second * 30)
|
||||
pakId := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakId,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
|
||||
|
@ -181,8 +188,8 @@ func (*Suite) TestEphemeralKeyNotReusable(c *check.C) {
|
|||
_, err = db.getNode("test7", "testest")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
ExpireEphemeralNodes(tx, time.Second*20)
|
||||
db.Write(func(tx *gorm.DB) error {
|
||||
DeleteExpiredEphemeralNodes(tx, time.Second*20)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
|
|
@ -2,13 +2,16 @@ package db
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sort"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
var ErrRouteIsNotAvailable = errors.New("route is not available")
|
||||
|
@ -124,8 +127,8 @@ func EnableRoute(tx *gorm.DB, id uint64) (*types.StateUpdate, error) {
|
|||
|
||||
func DisableRoute(tx *gorm.DB,
|
||||
id uint64,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
) (*types.StateUpdate, error) {
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
) ([]types.NodeID, error) {
|
||||
route, err := GetRoute(tx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -137,16 +140,15 @@ func DisableRoute(tx *gorm.DB,
|
|||
// Tailscale requires both IPv4 and IPv6 exit routes to
|
||||
// be enabled at the same time, as per
|
||||
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
|
||||
var update *types.StateUpdate
|
||||
var update []types.NodeID
|
||||
if !route.IsExitRoute() {
|
||||
update, err = failoverRouteReturnUpdate(tx, isConnected, route)
|
||||
route.Enabled = false
|
||||
err = tx.Save(route).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
route.Enabled = false
|
||||
route.IsPrimary = false
|
||||
err = tx.Save(route).Error
|
||||
update, err = failoverRouteTx(tx, isLikelyConnected, route)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -160,6 +162,7 @@ func DisableRoute(tx *gorm.DB,
|
|||
if routes[i].IsExitRoute() {
|
||||
routes[i].Enabled = false
|
||||
routes[i].IsPrimary = false
|
||||
|
||||
err = tx.Save(&routes[i]).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -168,26 +171,11 @@ func DisableRoute(tx *gorm.DB,
|
|||
}
|
||||
}
|
||||
|
||||
if routes == nil {
|
||||
routes, err = GetNodeRoutes(tx, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
node.Routes = routes
|
||||
|
||||
// If update is empty, it means that one was not created
|
||||
// by failover (as a failover was not necessary), create
|
||||
// one and return to the caller.
|
||||
if update == nil {
|
||||
update = &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{
|
||||
&node,
|
||||
},
|
||||
Message: "called from db.DisableRoute",
|
||||
}
|
||||
update = []types.NodeID{node.ID}
|
||||
}
|
||||
|
||||
return update, nil
|
||||
|
@ -195,18 +183,18 @@ func DisableRoute(tx *gorm.DB,
|
|||
|
||||
func (hsdb *HSDatabase) DeleteRoute(
|
||||
id uint64,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
) (*types.StateUpdate, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
|
||||
return DeleteRoute(tx, id, isConnected)
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
) ([]types.NodeID, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
|
||||
return DeleteRoute(tx, id, isLikelyConnected)
|
||||
})
|
||||
}
|
||||
|
||||
func DeleteRoute(
|
||||
tx *gorm.DB,
|
||||
id uint64,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
) (*types.StateUpdate, error) {
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
) ([]types.NodeID, error) {
|
||||
route, err := GetRoute(tx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -218,9 +206,9 @@ func DeleteRoute(
|
|||
// Tailscale requires both IPv4 and IPv6 exit routes to
|
||||
// be enabled at the same time, as per
|
||||
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
|
||||
var update *types.StateUpdate
|
||||
var update []types.NodeID
|
||||
if !route.IsExitRoute() {
|
||||
update, err = failoverRouteReturnUpdate(tx, isConnected, route)
|
||||
update, err = failoverRouteTx(tx, isLikelyConnected, route)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -229,7 +217,7 @@ func DeleteRoute(
|
|||
return nil, err
|
||||
}
|
||||
} else {
|
||||
routes, err := GetNodeRoutes(tx, &node)
|
||||
routes, err = GetNodeRoutes(tx, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -259,35 +247,37 @@ func DeleteRoute(
|
|||
node.Routes = routes
|
||||
|
||||
if update == nil {
|
||||
update = &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{
|
||||
&node,
|
||||
},
|
||||
Message: "called from db.DeleteRoute",
|
||||
}
|
||||
update = []types.NodeID{node.ID}
|
||||
}
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isConnected map[key.MachinePublic]bool) error {
|
||||
func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) {
|
||||
routes, err := GetNodeRoutes(tx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("getting node routes: %w", err)
|
||||
}
|
||||
|
||||
var changed []types.NodeID
|
||||
for i := range routes {
|
||||
if err := tx.Unscoped().Delete(&routes[i]).Error; err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("deleting route(%d): %w", &routes[i].ID, err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): This is a bit too aggressive, we could probably
|
||||
// figure out which routes needs to be failed over rather than all.
|
||||
failoverRouteReturnUpdate(tx, isConnected, &routes[i])
|
||||
chn, err := failoverRouteTx(tx, isLikelyConnected, &routes[i])
|
||||
if err != nil {
|
||||
return changed, fmt.Errorf("failing over route after delete: %w", err)
|
||||
}
|
||||
|
||||
if chn != nil {
|
||||
changed = append(changed, chn...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// isUniquePrefix returns if there is another node providing the same route already.
|
||||
|
@ -400,7 +390,7 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) {
|
|||
for prefix, exists := range advertisedRoutes {
|
||||
if !exists {
|
||||
route := types.Route{
|
||||
NodeID: node.ID,
|
||||
NodeID: node.ID.Uint64(),
|
||||
Prefix: types.IPPrefix(prefix),
|
||||
Advertised: true,
|
||||
Enabled: false,
|
||||
|
@ -415,11 +405,12 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) {
|
|||
return sendUpdate, nil
|
||||
}
|
||||
|
||||
// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route
|
||||
// currently have a functioning host that exposes the network.
|
||||
func EnsureFailoverRouteIsAvailable(
|
||||
// FailoverNodeRoutesIfNeccessary takes a node and checks if the node's route
|
||||
// need to be failed over to another host.
|
||||
// If needed, the failover will be attempted.
|
||||
func FailoverNodeRoutesIfNeccessary(
|
||||
tx *gorm.DB,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
node *types.Node,
|
||||
) (*types.StateUpdate, error) {
|
||||
nodeRoutes, err := GetNodeRoutes(tx, node)
|
||||
|
@ -427,82 +418,57 @@ func EnsureFailoverRouteIsAvailable(
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
var changedNodes types.Nodes
|
||||
changedNodes := make(set.Set[types.NodeID])
|
||||
|
||||
nodeRouteLoop:
|
||||
for _, nodeRoute := range nodeRoutes {
|
||||
routes, err := getRoutesByPrefix(tx, netip.Prefix(nodeRoute.Prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("getting routes by prefix: %w", err)
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
if route.IsPrimary {
|
||||
// if we have a primary route, and the node is connected
|
||||
// nothing needs to be done.
|
||||
if isConnected[route.Node.MachineKey] {
|
||||
continue
|
||||
if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val {
|
||||
continue nodeRouteLoop
|
||||
}
|
||||
|
||||
// if not, we need to failover the route
|
||||
update, err := failoverRouteReturnUpdate(tx, isConnected, &route)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failover := failoverRoute(isLikelyConnected, &route, routes)
|
||||
if failover != nil {
|
||||
err := failover.save(tx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("saving failover routes: %w", err)
|
||||
}
|
||||
|
||||
if update != nil {
|
||||
changedNodes = append(changedNodes, update.ChangeNodes...)
|
||||
changedNodes.Add(failover.old.Node.ID)
|
||||
changedNodes.Add(failover.new.Node.ID)
|
||||
|
||||
continue nodeRouteLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chng := changedNodes.Slice()
|
||||
sort.SliceStable(chng, func(i, j int) bool {
|
||||
return chng[i] < chng[j]
|
||||
})
|
||||
|
||||
if len(changedNodes) != 0 {
|
||||
return &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
Message: "called from db.EnsureFailoverRouteIsAvailable",
|
||||
ChangeNodes: chng,
|
||||
Message: "called from db.FailoverNodeRoutesIfNeccessary",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func failoverRouteReturnUpdate(
|
||||
tx *gorm.DB,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
r *types.Route,
|
||||
) (*types.StateUpdate, error) {
|
||||
changedKeys, err := failoverRoute(tx, isConnected, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Interface("isConnected", isConnected).
|
||||
Interface("changedKeys", changedKeys).
|
||||
Msg("building route failover")
|
||||
|
||||
if len(changedKeys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var nodes types.Nodes
|
||||
for _, key := range changedKeys {
|
||||
node, err := GetNodeByMachineKey(tx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: nodes,
|
||||
Message: "called from db.failoverRouteReturnUpdate",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// failoverRoute takes a route that is no longer available,
|
||||
// failoverRouteTx takes a route that is no longer available,
|
||||
// this can be either from:
|
||||
// - being disabled
|
||||
// - being deleted
|
||||
|
@ -510,11 +476,11 @@ func failoverRouteReturnUpdate(
|
|||
//
|
||||
// and tries to find a new route to take over its place.
|
||||
// If the given route was not primary, it returns early.
|
||||
func failoverRoute(
|
||||
func failoverRouteTx(
|
||||
tx *gorm.DB,
|
||||
isConnected map[key.MachinePublic]bool,
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
r *types.Route,
|
||||
) ([]key.MachinePublic, error) {
|
||||
) ([]types.NodeID, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -532,14 +498,72 @@ func failoverRoute(
|
|||
|
||||
routes, err := getRoutesByPrefix(tx, netip.Prefix(r.Prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("getting routes by prefix: %w", err)
|
||||
}
|
||||
|
||||
fo := failoverRoute(isLikelyConnected, r, routes)
|
||||
if fo == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = fo.save(tx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("saving failover route: %w", err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", fo.new.Node.Hostname).
|
||||
Msgf("set primary to new route, was: id(%d), host(%s), now: id(%d), host(%s)", fo.old.ID, fo.old.Node.Hostname, fo.new.ID, fo.new.Node.Hostname)
|
||||
|
||||
// Return a list of the machinekeys of the changed nodes.
|
||||
return []types.NodeID{fo.old.Node.ID, fo.new.Node.ID}, nil
|
||||
}
|
||||
|
||||
type failover struct {
|
||||
old *types.Route
|
||||
new *types.Route
|
||||
}
|
||||
|
||||
func (f *failover) save(tx *gorm.DB) error {
|
||||
err := tx.Save(f.old).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving old primary: %w", err)
|
||||
}
|
||||
|
||||
err = tx.Save(f.new).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving new primary: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func failoverRoute(
|
||||
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
|
||||
routeToReplace *types.Route,
|
||||
altRoutes types.Routes,
|
||||
|
||||
) *failover {
|
||||
if routeToReplace == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This route is not a primary route, and it is not
|
||||
// being served to nodes.
|
||||
if !routeToReplace.IsPrimary {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We do not have to failover exit nodes
|
||||
if routeToReplace.IsExitRoute() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var newPrimary *types.Route
|
||||
|
||||
// Find a new suitable route
|
||||
for idx, route := range routes {
|
||||
if r.ID == route.ID {
|
||||
for idx, route := range altRoutes {
|
||||
if routeToReplace.ID == route.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -547,9 +571,11 @@ func failoverRoute(
|
|||
continue
|
||||
}
|
||||
|
||||
if isConnected[route.Node.MachineKey] {
|
||||
newPrimary = &routes[idx]
|
||||
break
|
||||
if isLikelyConnected != nil {
|
||||
if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val {
|
||||
newPrimary = &altRoutes[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -559,48 +585,23 @@ func failoverRoute(
|
|||
// the one currently marked as primary is the
|
||||
// best we got.
|
||||
if newPrimary == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("found new primary, updating db")
|
||||
|
||||
// Remove primary from the old route
|
||||
r.IsPrimary = false
|
||||
err = tx.Save(&r).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error disabling new primary route")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("removed primary from old route")
|
||||
|
||||
// Set primary for the new primary
|
||||
routeToReplace.IsPrimary = false
|
||||
newPrimary.IsPrimary = true
|
||||
err = tx.Save(&newPrimary).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error enabling new primary route")
|
||||
|
||||
return nil, err
|
||||
return &failover{
|
||||
old: routeToReplace,
|
||||
new: newPrimary,
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("set primary to new route")
|
||||
|
||||
// Return a list of the machinekeys of the changed nodes.
|
||||
return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
|
||||
aclPolicy *policy.ACLPolicy,
|
||||
node *types.Node,
|
||||
) (*types.StateUpdate, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
|
||||
) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return EnableAutoApprovedRoutes(tx, aclPolicy, node)
|
||||
})
|
||||
}
|
||||
|
@ -610,20 +611,14 @@ func EnableAutoApprovedRoutes(
|
|||
tx *gorm.DB,
|
||||
aclPolicy *policy.ACLPolicy,
|
||||
node *types.Node,
|
||||
) (*types.StateUpdate, error) {
|
||||
if len(node.IPAddresses) == 0 {
|
||||
return nil, nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
|
||||
) error {
|
||||
if node.IPv4 == nil && node.IPv6 == nil {
|
||||
return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
|
||||
}
|
||||
|
||||
routes, err := GetNodeAdvertisedRoutes(tx, node)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Could not get advertised routes for node")
|
||||
|
||||
return nil, err
|
||||
return fmt.Errorf("getting advertised routes for node(%s %d): %w", node.Hostname, node.ID, err)
|
||||
}
|
||||
|
||||
log.Trace().Interface("routes", routes).Msg("routes for autoapproving")
|
||||
|
@ -639,12 +634,7 @@ func EnableAutoApprovedRoutes(
|
|||
netip.Prefix(advertisedRoute.Prefix),
|
||||
)
|
||||
if err != nil {
|
||||
log.Err(err).
|
||||
Str("advertisedRoute", advertisedRoute.String()).
|
||||
Uint64("nodeId", node.ID).
|
||||
Msg("Failed to resolve autoApprovers for advertised route")
|
||||
|
||||
return nil, err
|
||||
return fmt.Errorf("failed to resolve autoApprovers for route(%d) for node(%s %d): %w", advertisedRoute.ID, node.Hostname, node.ID, err)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
|
@ -661,40 +651,23 @@ func EnableAutoApprovedRoutes(
|
|||
// TODO(kradalby): figure out how to get this to depend on less stuff
|
||||
approvedIps, err := aclPolicy.ExpandAlias(types.Nodes{node}, approvedAlias)
|
||||
if err != nil {
|
||||
log.Err(err).
|
||||
Str("alias", approvedAlias).
|
||||
Msg("Failed to expand alias when processing autoApprovers policy")
|
||||
|
||||
return nil, err
|
||||
return fmt.Errorf("expanding alias %q for autoApprovers: %w", approvedAlias, err)
|
||||
}
|
||||
|
||||
// approvedIPs should contain all of node's IPs if it matches the rule, so check for first
|
||||
if approvedIps.Contains(node.IPAddresses[0]) {
|
||||
if approvedIps.Contains(*node.IPv4) {
|
||||
approvedRoutes = append(approvedRoutes, advertisedRoute)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update := &types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{},
|
||||
Message: "created in db.EnableAutoApprovedRoutes",
|
||||
}
|
||||
|
||||
for _, approvedRoute := range approvedRoutes {
|
||||
perHostUpdate, err := EnableRoute(tx, uint64(approvedRoute.ID))
|
||||
_, err := EnableRoute(tx, uint64(approvedRoute.ID))
|
||||
if err != nil {
|
||||
log.Err(err).
|
||||
Str("approvedRoute", approvedRoute.String()).
|
||||
Uint64("nodeId", node.ID).
|
||||
Msg("Failed to enable approved route")
|
||||
|
||||
return nil, err
|
||||
return fmt.Errorf("enabling approved route(%d): %w", approvedRoute.ID, err)
|
||||
}
|
||||
|
||||
update.ChangeNodes = append(update.ChangeNodes, perHostUpdate.ChangeNodes...)
|
||||
}
|
||||
|
||||
return update, nil
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,10 +2,10 @@ package db
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -34,12 +34,7 @@ func CreateUser(tx *gorm.DB, name string) (*types.User, error) {
|
|||
}
|
||||
user.Name = name
|
||||
if err := tx.Create(&user).Error; err != nil {
|
||||
log.Error().
|
||||
Str("func", "CreateUser").
|
||||
Err(err).
|
||||
Msg("Could not create row")
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating user: %w", err)
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
|
|
|
@ -46,14 +46,16 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) {
|
|||
pak, err = db.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
|
||||
err = db.DestroyUser("test")
|
||||
c.Assert(err, check.Equals, ErrUserStillHasNodes)
|
||||
|
@ -98,14 +100,16 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
|
|||
pak, err := db.CreatePreAuthKey(oldUser.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pakID := uint(pak.ID)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
Hostname: "testnode",
|
||||
UserID: oldUser.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
AuthKeyID: &pakID,
|
||||
}
|
||||
db.DB.Save(&node)
|
||||
trx := db.DB.Save(&node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
c.Assert(node.UserID, check.Equals, oldUser.ID)
|
||||
|
||||
err = db.AssignNodeToUser(&node, newUser.Name)
|
||||
|
|
|
@ -31,7 +31,7 @@ func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {
|
|||
}
|
||||
|
||||
func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), types.HTTPReadTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), types.HTTPTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)
|
||||
|
@ -40,7 +40,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
|||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: types.HTTPReadTimeout,
|
||||
Timeout: types.HTTPTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
|
|
@ -204,7 +204,7 @@ func DERPProbeHandler(
|
|||
}
|
||||
}
|
||||
|
||||
// DERPBootstrapDNSHandler implements the /bootsrap-dns endpoint
|
||||
// DERPBootstrapDNSHandler implements the /bootstrap-dns endpoint
|
||||
// Described in https://github.com/tailscale/tailscale/issues/1405,
|
||||
// this endpoint provides a way to help a client when it fails to start up
|
||||
// because its DNS are broken.
|
||||
|
|
|
@ -3,7 +3,7 @@ package hscontrol
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -144,7 +144,7 @@ func (api headscaleV1APIServer) ExpirePreAuthKey(
|
|||
ctx context.Context,
|
||||
request *v1.ExpirePreAuthKeyRequest,
|
||||
) (*v1.ExpirePreAuthKeyResponse, error) {
|
||||
err := api.h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
err := api.h.db.Write(func(tx *gorm.DB) error {
|
||||
preAuthKey, err := db.GetPreAuthKey(tx, request.GetUser(), request.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -195,7 +195,7 @@ func (api headscaleV1APIServer) RegisterNode(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
addrs, err := api.h.ipAlloc.Next()
|
||||
ipv4, ipv6, err := api.h.ipAlloc.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func (api headscaleV1APIServer) RegisterNode(
|
|||
request.GetUser(),
|
||||
nil,
|
||||
util.RegisterMethodCLI,
|
||||
addrs,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -222,7 +222,7 @@ func (api headscaleV1APIServer) GetNode(
|
|||
ctx context.Context,
|
||||
request *v1.GetNodeRequest,
|
||||
) (*v1.GetNodeResponse, error) {
|
||||
node, err := api.h.db.GetNodeByID(request.GetNodeId())
|
||||
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ func (api headscaleV1APIServer) GetNode(
|
|||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
resp.Online = api.h.nodeNotifier.IsConnected(node.ID)
|
||||
|
||||
return &v1.GetNodeResponse{Node: resp}, nil
|
||||
}
|
||||
|
@ -248,12 +248,12 @@ func (api headscaleV1APIServer) SetTags(
|
|||
}
|
||||
|
||||
node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
err := db.SetTags(tx, request.GetNodeId(), request.GetTags())
|
||||
err := db.SetTags(tx, types.NodeID(request.GetNodeId()), request.GetTags())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.GetNodeByID(tx, request.GetNodeId())
|
||||
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
|
||||
})
|
||||
if err != nil {
|
||||
return &v1.SetTagsResponse{
|
||||
|
@ -261,15 +261,12 @@ func (api headscaleV1APIServer) SetTags(
|
|||
}, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
ctx = types.NotifyCtx(ctx, "cli-settags", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
ChangeNodes: []types.NodeID{node.ID},
|
||||
Message: "called from api.SetTags",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(ctx, "cli-settags", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
}, node.ID)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
|
@ -281,13 +278,13 @@ func (api headscaleV1APIServer) SetTags(
|
|||
|
||||
func validateTag(tag string) error {
|
||||
if strings.Index(tag, "tag:") != 0 {
|
||||
return fmt.Errorf("tag must start with the string 'tag:'")
|
||||
return errors.New("tag must start with the string 'tag:'")
|
||||
}
|
||||
if strings.ToLower(tag) != tag {
|
||||
return fmt.Errorf("tag should be lowercase")
|
||||
return errors.New("tag should be lowercase")
|
||||
}
|
||||
if len(strings.Fields(tag)) > 1 {
|
||||
return fmt.Errorf("tag should not contains space")
|
||||
return errors.New("tag should not contains space")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -296,26 +293,30 @@ func (api headscaleV1APIServer) DeleteNode(
|
|||
ctx context.Context,
|
||||
request *v1.DeleteNodeRequest,
|
||||
) (*v1.DeleteNodeResponse, error) {
|
||||
node, err := api.h.db.GetNodeByID(request.GetNodeId())
|
||||
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.db.DeleteNode(
|
||||
changedNodes, err := api.h.db.DeleteNode(
|
||||
node,
|
||||
api.h.nodeNotifier.ConnectedMap(),
|
||||
api.h.nodeNotifier.LikelyConnectedMap(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
ctx = types.NotifyCtx(ctx, "cli-deletenode", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(ctx, "cli-deletenode", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyAll(ctx, stateUpdate)
|
||||
Removed: []types.NodeID{node.ID},
|
||||
})
|
||||
|
||||
if changedNodes != nil {
|
||||
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
})
|
||||
}
|
||||
|
||||
return &v1.DeleteNodeResponse{}, nil
|
||||
|
@ -330,33 +331,27 @@ func (api headscaleV1APIServer) ExpireNode(
|
|||
node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
db.NodeSetExpiry(
|
||||
tx,
|
||||
request.GetNodeId(),
|
||||
types.NodeID(request.GetNodeId()),
|
||||
now,
|
||||
)
|
||||
|
||||
return db.GetNodeByID(tx, request.GetNodeId())
|
||||
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selfUpdate := types.StateUpdate{
|
||||
Type: types.StateSelfUpdate,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
}
|
||||
if selfUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyByMachineKey(
|
||||
ctx,
|
||||
selfUpdate,
|
||||
node.MachineKey)
|
||||
}
|
||||
ctx = types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyByNodeID(
|
||||
ctx,
|
||||
types.StateUpdate{
|
||||
Type: types.StateSelfUpdate,
|
||||
ChangeNodes: []types.NodeID{node.ID},
|
||||
},
|
||||
node.ID)
|
||||
|
||||
stateUpdate := types.StateUpdateExpire(node.ID, now)
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
ctx = types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
|
@ -380,21 +375,18 @@ func (api headscaleV1APIServer) RenameNode(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return db.GetNodeByID(tx, request.GetNodeId())
|
||||
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
ctx = types.NotifyCtx(ctx, "cli-renamenode", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
ChangeNodes: []types.NodeID{node.ID},
|
||||
Message: "called from api.RenameNode",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(ctx, "cli-renamenode", node.Hostname)
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
}, node.ID)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
|
@ -408,7 +400,7 @@ func (api headscaleV1APIServer) ListNodes(
|
|||
ctx context.Context,
|
||||
request *v1.ListNodesRequest,
|
||||
) (*v1.ListNodesResponse, error) {
|
||||
isConnected := api.h.nodeNotifier.ConnectedMap()
|
||||
isLikelyConnected := api.h.nodeNotifier.LikelyConnectedMap()
|
||||
if request.GetUser() != "" {
|
||||
nodes, err := db.Read(api.h.db.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
||||
return db.ListNodesByUser(rx, request.GetUser())
|
||||
|
@ -423,7 +415,9 @@ func (api headscaleV1APIServer) ListNodes(
|
|||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = isConnected[node.MachineKey]
|
||||
if val, ok := isLikelyConnected.Load(node.ID); ok && val {
|
||||
resp.Online = true
|
||||
}
|
||||
|
||||
response[index] = resp
|
||||
}
|
||||
|
@ -446,7 +440,9 @@ func (api headscaleV1APIServer) ListNodes(
|
|||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = isConnected[node.MachineKey]
|
||||
if val, ok := isLikelyConnected.Load(node.ID); ok && val {
|
||||
resp.Online = true
|
||||
}
|
||||
|
||||
validTags, invalidTags := api.h.ACLPolicy.TagsOfNode(
|
||||
node,
|
||||
|
@ -463,7 +459,7 @@ func (api headscaleV1APIServer) MoveNode(
|
|||
ctx context.Context,
|
||||
request *v1.MoveNodeRequest,
|
||||
) (*v1.MoveNodeResponse, error) {
|
||||
node, err := api.h.db.GetNodeByID(request.GetNodeId())
|
||||
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -476,6 +472,24 @@ func (api headscaleV1APIServer) MoveNode(
|
|||
return &v1.MoveNodeResponse{Node: node.Proto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) BackfillNodeIPs(
|
||||
ctx context.Context,
|
||||
request *v1.BackfillNodeIPsRequest,
|
||||
) (*v1.BackfillNodeIPsResponse, error) {
|
||||
log.Trace().Msg("Backfill called")
|
||||
|
||||
if !request.Confirmed {
|
||||
return nil, errors.New("not confirmed, aborting")
|
||||
}
|
||||
|
||||
changes, err := api.h.db.BackfillNodeIPs(api.h.ipAlloc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.BackfillNodeIPsResponse{Changes: changes}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetRoutes(
|
||||
ctx context.Context,
|
||||
request *v1.GetRoutesRequest,
|
||||
|
@ -503,7 +517,7 @@ func (api headscaleV1APIServer) EnableRoute(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if update != nil && update.Valid() {
|
||||
if update != nil {
|
||||
ctx := types.NotifyCtx(ctx, "cli-enableroute", "unknown")
|
||||
api.h.nodeNotifier.NotifyAll(
|
||||
ctx, *update)
|
||||
|
@ -516,17 +530,19 @@ func (api headscaleV1APIServer) DisableRoute(
|
|||
ctx context.Context,
|
||||
request *v1.DisableRouteRequest,
|
||||
) (*v1.DisableRouteResponse, error) {
|
||||
isConnected := api.h.nodeNotifier.ConnectedMap()
|
||||
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
|
||||
return db.DisableRoute(tx, request.GetRouteId(), isConnected)
|
||||
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
|
||||
return db.DisableRoute(tx, request.GetRouteId(), api.h.nodeNotifier.LikelyConnectedMap())
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if update != nil && update.Valid() {
|
||||
if update != nil {
|
||||
ctx := types.NotifyCtx(ctx, "cli-disableroute", "unknown")
|
||||
api.h.nodeNotifier.NotifyAll(ctx, *update)
|
||||
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: update,
|
||||
})
|
||||
}
|
||||
|
||||
return &v1.DisableRouteResponse{}, nil
|
||||
|
@ -536,7 +552,7 @@ func (api headscaleV1APIServer) GetNodeRoutes(
|
|||
ctx context.Context,
|
||||
request *v1.GetNodeRoutesRequest,
|
||||
) (*v1.GetNodeRoutesResponse, error) {
|
||||
node, err := api.h.db.GetNodeByID(request.GetNodeId())
|
||||
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -555,17 +571,20 @@ func (api headscaleV1APIServer) DeleteRoute(
|
|||
ctx context.Context,
|
||||
request *v1.DeleteRouteRequest,
|
||||
) (*v1.DeleteRouteResponse, error) {
|
||||
isConnected := api.h.nodeNotifier.ConnectedMap()
|
||||
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
|
||||
isConnected := api.h.nodeNotifier.LikelyConnectedMap()
|
||||
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
|
||||
return db.DeleteRoute(tx, request.GetRouteId(), isConnected)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if update != nil && update.Valid() {
|
||||
if update != nil {
|
||||
ctx := types.NotifyCtx(ctx, "cli-deleteroute", "unknown")
|
||||
api.h.nodeNotifier.NotifyWithIgnore(ctx, *update)
|
||||
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: update,
|
||||
})
|
||||
}
|
||||
|
||||
return &v1.DeleteRouteResponse{}, nil
|
||||
|
|
|
@ -68,12 +68,6 @@ func (h *Headscale) KeyHandler(
|
|||
Msg("could not get capability version")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -82,19 +76,6 @@ func (h *Headscale) KeyHandler(
|
|||
Str("handler", "/key").
|
||||
Int("cap_ver", int(capVer)).
|
||||
Msg("New noise client")
|
||||
if err != nil {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("Wrong params"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TS2021 (Tailscale v2 protocol) requires to have a different key
|
||||
if capVer >= NoiseCapabilityVersion {
|
||||
|
|
|
@ -16,12 +16,13 @@ import (
|
|||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/exp/maps"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/smallzstd"
|
||||
"tailscale.com/tailcfg"
|
||||
|
@ -51,21 +52,14 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
|
|||
type Mapper struct {
|
||||
// Configuration
|
||||
// TODO(kradalby): figure out if this is the format we want this in
|
||||
derpMap *tailcfg.DERPMap
|
||||
baseDomain string
|
||||
dnsCfg *tailcfg.DNSConfig
|
||||
logtail bool
|
||||
randomClientPort bool
|
||||
db *db.HSDatabase
|
||||
cfg *types.Config
|
||||
derpMap *tailcfg.DERPMap
|
||||
notif *notifier.Notifier
|
||||
|
||||
uid string
|
||||
created time.Time
|
||||
seq uint64
|
||||
|
||||
// Map isnt concurrency safe, so we need to ensure
|
||||
// only one func is accessing it over time.
|
||||
mu sync.Mutex
|
||||
peers map[uint64]*types.Node
|
||||
patches map[uint64][]patch
|
||||
}
|
||||
|
||||
type patch struct {
|
||||
|
@ -74,35 +68,22 @@ type patch struct {
|
|||
}
|
||||
|
||||
func NewMapper(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
db *db.HSDatabase,
|
||||
cfg *types.Config,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
baseDomain string,
|
||||
dnsCfg *tailcfg.DNSConfig,
|
||||
logtail bool,
|
||||
randomClientPort bool,
|
||||
notif *notifier.Notifier,
|
||||
) *Mapper {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("creating new mapper")
|
||||
|
||||
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
||||
|
||||
return &Mapper{
|
||||
derpMap: derpMap,
|
||||
baseDomain: baseDomain,
|
||||
dnsCfg: dnsCfg,
|
||||
logtail: logtail,
|
||||
randomClientPort: randomClientPort,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
derpMap: derpMap,
|
||||
notif: notif,
|
||||
|
||||
uid: uid,
|
||||
created: time.Now(),
|
||||
seq: 0,
|
||||
|
||||
// TODO: populate
|
||||
peers: peers.IDMap(),
|
||||
patches: make(map[uint64][]patch),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,8 +175,8 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
|||
"device_model": []string{node.Hostinfo.OS},
|
||||
}
|
||||
|
||||
if len(node.IPAddresses) > 0 {
|
||||
attrs.Add("device_ip", node.IPAddresses[0].String())
|
||||
if len(node.IPs()) > 0 {
|
||||
attrs.Add("device_ip", node.IPs()[0].String())
|
||||
}
|
||||
|
||||
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
||||
|
@ -207,11 +188,10 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
|||
// It is a separate function to make testing easier.
|
||||
func (m *Mapper) fullMapResponse(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
pol *policy.ACLPolicy,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
peers := nodeMapToList(m.peers)
|
||||
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol, capVer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -219,14 +199,13 @@ func (m *Mapper) fullMapResponse(
|
|||
|
||||
err = appendPeerChanges(
|
||||
resp,
|
||||
true, // full change
|
||||
pol,
|
||||
node,
|
||||
capVer,
|
||||
peers,
|
||||
peers,
|
||||
m.baseDomain,
|
||||
m.dnsCfg,
|
||||
m.randomClientPort,
|
||||
m.cfg,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -240,35 +219,25 @@ func (m *Mapper) FullMapResponse(
|
|||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
messages ...string,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
peers := maps.Keys(m.peers)
|
||||
peersWithPatches := maps.Keys(m.patches)
|
||||
slices.Sort(peers)
|
||||
slices.Sort(peersWithPatches)
|
||||
|
||||
if len(peersWithPatches) > 0 {
|
||||
log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Uints64("peers", peers).
|
||||
Uints64("pending_patches", peersWithPatches).
|
||||
Msgf("node requested full map response, but has pending patches")
|
||||
}
|
||||
|
||||
resp, err := m.fullMapResponse(node, pol, mapRequest.Version)
|
||||
peers, err := m.ListPeers(node.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
resp, err := m.fullMapResponse(node, peers, pol, mapRequest.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...)
|
||||
}
|
||||
|
||||
// LiteMapResponse returns a MapResponse for the given node.
|
||||
// ReadOnlyResponse returns a MapResponse for the given node.
|
||||
// Lite means that the peers has been omitted, this is intended
|
||||
// to be used to answer MapRequests with OmitPeers set to true.
|
||||
func (m *Mapper) LiteMapResponse(
|
||||
func (m *Mapper) ReadOnlyMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
|
@ -279,18 +248,6 @@ func (m *Mapper) LiteMapResponse(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
|
||||
pol,
|
||||
node,
|
||||
nodeMapToList(m.peers),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
|
||||
resp.SSHPolicy = sshPolicy
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...)
|
||||
}
|
||||
|
||||
|
@ -320,50 +277,74 @@ func (m *Mapper) DERPMapResponse(
|
|||
func (m *Mapper) PeerChangedResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
changed types.Nodes,
|
||||
changed map[types.NodeID]bool,
|
||||
patches []*tailcfg.PeerChange,
|
||||
pol *policy.ACLPolicy,
|
||||
messages ...string,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Update our internal map.
|
||||
for _, node := range changed {
|
||||
if patches, ok := m.patches[node.ID]; ok {
|
||||
// preserve online status in case the patch has an outdated one
|
||||
online := node.IsOnline
|
||||
|
||||
for _, p := range patches {
|
||||
// TODO(kradalby): Figure if this needs to be sorted by timestamp
|
||||
node.ApplyPeerChange(p.change)
|
||||
}
|
||||
|
||||
// Ensure the patches are not applied again later
|
||||
delete(m.patches, node.ID)
|
||||
|
||||
node.IsOnline = online
|
||||
}
|
||||
|
||||
m.peers[node.ID] = node
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
|
||||
err := appendPeerChanges(
|
||||
peers, err := m.ListPeers(node.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var removedIDs []tailcfg.NodeID
|
||||
var changedIDs []types.NodeID
|
||||
for nodeID, nodeChanged := range changed {
|
||||
if nodeChanged {
|
||||
changedIDs = append(changedIDs, nodeID)
|
||||
} else {
|
||||
removedIDs = append(removedIDs, nodeID.NodeID())
|
||||
}
|
||||
}
|
||||
|
||||
changedNodes := make(types.Nodes, 0, len(changedIDs))
|
||||
for _, peer := range peers {
|
||||
if slices.Contains(changedIDs, peer.ID) {
|
||||
changedNodes = append(changedNodes, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = appendPeerChanges(
|
||||
&resp,
|
||||
false, // partial change
|
||||
pol,
|
||||
node,
|
||||
mapRequest.Version,
|
||||
nodeMapToList(m.peers),
|
||||
changed,
|
||||
m.baseDomain,
|
||||
m.dnsCfg,
|
||||
m.randomClientPort,
|
||||
peers,
|
||||
changedNodes,
|
||||
m.cfg,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.PeersRemoved = removedIDs
|
||||
|
||||
// Sending patches as a part of a PeersChanged response
|
||||
// is technically not suppose to be done, but they are
|
||||
// applied after the PeersChanged. The patch list
|
||||
// should _only_ contain Nodes that are not in the
|
||||
// PeersChanged or PeersRemoved list and the caller
|
||||
// should filter them out.
|
||||
//
|
||||
// From tailcfg docs:
|
||||
// These are applied after Peers* above, but in practice the
|
||||
// control server should only send these on their own, without
|
||||
// the Peers* fields also set.
|
||||
if patches != nil {
|
||||
resp.PeersChangedPatch = patches
|
||||
}
|
||||
|
||||
// Add the node itself, it might have changed, and particularly
|
||||
// if there are no patches or changes, this is a self update.
|
||||
tailnode, err := tailNode(node, mapRequest.Version, pol, m.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Node = tailnode
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...)
|
||||
}
|
||||
|
||||
|
@ -375,71 +356,12 @@ func (m *Mapper) PeerChangedPatchResponse(
|
|||
changed []*tailcfg.PeerChange,
|
||||
pol *policy.ACLPolicy,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
sendUpdate := false
|
||||
// patch the internal map
|
||||
for _, change := range changed {
|
||||
if peer, ok := m.peers[uint64(change.NodeID)]; ok {
|
||||
peer.ApplyPeerChange(change)
|
||||
sendUpdate = true
|
||||
} else {
|
||||
log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname)
|
||||
|
||||
p := patch{
|
||||
timestamp: time.Now(),
|
||||
change: change,
|
||||
}
|
||||
|
||||
if patches, ok := m.patches[uint64(change.NodeID)]; ok {
|
||||
m.patches[uint64(change.NodeID)] = append(patches, p)
|
||||
} else {
|
||||
m.patches[uint64(change.NodeID)] = []patch{p}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !sendUpdate {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
resp.PeersChangedPatch = changed
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
// TODO(kradalby): We need some integration tests for this.
|
||||
func (m *Mapper) PeerRemovedResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
removed []tailcfg.NodeID,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Some nodes might have been removed already
|
||||
// so we dont want to ask downstream to remove
|
||||
// twice, than can cause a panic in tailscaled.
|
||||
notYetRemoved := []tailcfg.NodeID{}
|
||||
|
||||
// remove from our internal map
|
||||
for _, id := range removed {
|
||||
if _, ok := m.peers[uint64(id)]; ok {
|
||||
notYetRemoved = append(notYetRemoved, id)
|
||||
}
|
||||
|
||||
delete(m.peers, uint64(id))
|
||||
delete(m.patches, uint64(id))
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
resp.PeersRemoved = notYetRemoved
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
func (m *Mapper) marshalMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
resp *tailcfg.MapResponse,
|
||||
|
@ -451,10 +373,7 @@ func (m *Mapper) marshalMapResponse(
|
|||
|
||||
jsonBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot marshal map response")
|
||||
return nil, fmt.Errorf("marshalling map response: %w", err)
|
||||
}
|
||||
|
||||
if debugDumpMapResponsePath != "" {
|
||||
|
@ -469,10 +388,8 @@ func (m *Mapper) marshalMapResponse(
|
|||
switch {
|
||||
case resp.Peers != nil && len(resp.Peers) > 0:
|
||||
responseType = "full"
|
||||
case isSelfUpdate(messages...):
|
||||
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil && resp.DERPMap == nil && !resp.KeepAlive:
|
||||
responseType = "self"
|
||||
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
|
||||
responseType = "lite"
|
||||
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
|
||||
responseType = "changed"
|
||||
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
|
||||
|
@ -483,10 +400,7 @@ func (m *Mapper) marshalMapResponse(
|
|||
|
||||
body, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot marshal map response")
|
||||
return nil, fmt.Errorf("marshalling map response: %w", err)
|
||||
}
|
||||
|
||||
perms := fs.FileMode(debugMapResponsePerm)
|
||||
|
@ -496,11 +410,11 @@ func (m *Mapper) marshalMapResponse(
|
|||
panic(err)
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
now := time.Now().Format("2006-01-02T15-04-05.999999999")
|
||||
|
||||
mapResponsePath := path.Join(
|
||||
mPath,
|
||||
fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType),
|
||||
fmt.Sprintf("%s-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType),
|
||||
)
|
||||
|
||||
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
||||
|
@ -574,7 +488,7 @@ func (m *Mapper) baseWithConfigMapResponse(
|
|||
) (*tailcfg.MapResponse, error) {
|
||||
resp := m.baseMapResponse()
|
||||
|
||||
tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
||||
tailnode, err := tailNode(node, capVer, pol, m.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -582,7 +496,7 @@ func (m *Mapper) baseWithConfigMapResponse(
|
|||
|
||||
resp.DERPMap = m.derpMap
|
||||
|
||||
resp.Domain = m.baseDomain
|
||||
resp.Domain = m.cfg.BaseDomain
|
||||
|
||||
// Do not instruct clients to collect services we do not
|
||||
// support or do anything with them
|
||||
|
@ -591,12 +505,26 @@ func (m *Mapper) baseWithConfigMapResponse(
|
|||
resp.KeepAlive = false
|
||||
|
||||
resp.Debug = &tailcfg.Debug{
|
||||
DisableLogTail: !m.logtail,
|
||||
DisableLogTail: !m.cfg.LogTail.Enabled,
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (m *Mapper) ListPeers(nodeID types.NodeID) (types.Nodes, error) {
|
||||
peers, err := m.db.ListPeers(nodeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
online := m.notif.IsLikelyConnected(peer.ID)
|
||||
peer.IsOnline = &online
|
||||
}
|
||||
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
||||
ret := make(types.Nodes, 0)
|
||||
|
||||
|
@ -612,42 +540,41 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
|||
func appendPeerChanges(
|
||||
resp *tailcfg.MapResponse,
|
||||
|
||||
fullChange bool,
|
||||
pol *policy.ACLPolicy,
|
||||
node *types.Node,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
peers types.Nodes,
|
||||
changed types.Nodes,
|
||||
baseDomain string,
|
||||
dnsCfg *tailcfg.DNSConfig,
|
||||
randomClientPort bool,
|
||||
cfg *types.Config,
|
||||
) error {
|
||||
fullChange := len(peers) == len(changed)
|
||||
|
||||
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
|
||||
pol,
|
||||
node,
|
||||
peers,
|
||||
)
|
||||
packetFilter, err := pol.CompileFilterRules(append(peers, node))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshPolicy, err := pol.CompileSSHPolicy(node, peers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are filter rules present, see if there are any nodes that cannot
|
||||
// access eachother at all and remove them from the peers.
|
||||
if len(rules) > 0 {
|
||||
changed = policy.FilterNodesByACL(node, changed, rules)
|
||||
if len(packetFilter) > 0 {
|
||||
changed = policy.FilterNodesByACL(node, changed, packetFilter)
|
||||
}
|
||||
|
||||
profiles := generateUserProfiles(node, changed, baseDomain)
|
||||
profiles := generateUserProfiles(node, changed, cfg.BaseDomain)
|
||||
|
||||
dnsConfig := generateDNSConfig(
|
||||
dnsCfg,
|
||||
baseDomain,
|
||||
cfg.DNSConfig,
|
||||
cfg.BaseDomain,
|
||||
node,
|
||||
peers,
|
||||
)
|
||||
|
||||
tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort)
|
||||
tailPeers, err := tailNodes(changed, capVer, pol, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -663,19 +590,9 @@ func appendPeerChanges(
|
|||
resp.PeersChanged = tailPeers
|
||||
}
|
||||
resp.DNSConfig = dnsConfig
|
||||
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
|
||||
resp.PacketFilter = policy.ReduceFilterRules(node, packetFilter)
|
||||
resp.UserProfiles = profiles
|
||||
resp.SSHPolicy = sshPolicy
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSelfUpdate(messages ...string) bool {
|
||||
for _, message := range messages {
|
||||
if strings.Contains(message, types.SelfUpdateIdentifier) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -17,6 +17,11 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
var iap = func(ipStr string) *netip.Addr {
|
||||
ip := netip.MustParseAddr(ipStr)
|
||||
return &ip
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) {
|
||||
mach := func(hostname, username string, userid uint) *types.Node {
|
||||
return &types.Node{
|
||||
|
@ -176,17 +181,16 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "mini",
|
||||
GivenName: "mini",
|
||||
UserID: 0,
|
||||
User: types.User{Name: "mini"},
|
||||
ForcedTags: []string{},
|
||||
AuthKeyID: 0,
|
||||
IPv4: iap("100.64.0.1"),
|
||||
Hostname: "mini",
|
||||
GivenName: "mini",
|
||||
UserID: 0,
|
||||
User: types.User{Name: "mini"},
|
||||
ForcedTags: []string{},
|
||||
AuthKey: &types.PreAuthKey{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Routes: []types.Route{
|
||||
{
|
||||
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
|
||||
|
@ -257,17 +261,17 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
Hostname: "peer1",
|
||||
GivenName: "peer1",
|
||||
UserID: 0,
|
||||
User: types.User{Name: "mini"},
|
||||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
IPv4: iap("100.64.0.2"),
|
||||
Hostname: "peer1",
|
||||
GivenName: "peer1",
|
||||
UserID: 0,
|
||||
User: types.User{Name: "mini"},
|
||||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
}
|
||||
|
||||
tailPeer1 := &tailcfg.Node{
|
||||
|
@ -312,17 +316,17 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
Hostname: "peer2",
|
||||
GivenName: "peer2",
|
||||
UserID: 1,
|
||||
User: types.User{Name: "peer2"},
|
||||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
IPv4: iap("100.64.0.3"),
|
||||
Hostname: "peer2",
|
||||
GivenName: "peer2",
|
||||
UserID: 1,
|
||||
User: types.User{Name: "peer2"},
|
||||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
@ -331,13 +335,10 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
node *types.Node
|
||||
peers types.Nodes
|
||||
|
||||
baseDomain string
|
||||
dnsConfig *tailcfg.DNSConfig
|
||||
derpMap *tailcfg.DERPMap
|
||||
logtail bool
|
||||
randomClientPort bool
|
||||
want *tailcfg.MapResponse
|
||||
wantErr bool
|
||||
derpMap *tailcfg.DERPMap
|
||||
cfg *types.Config
|
||||
want *tailcfg.MapResponse
|
||||
wantErr bool
|
||||
}{
|
||||
// {
|
||||
// name: "empty-node",
|
||||
|
@ -349,15 +350,17 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
// wantErr: true,
|
||||
// },
|
||||
{
|
||||
name: "no-pol-no-peers-map-response",
|
||||
pol: &policy.ACLPolicy{},
|
||||
node: mini,
|
||||
peers: types.Nodes{},
|
||||
baseDomain: "",
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
logtail: false,
|
||||
randomClientPort: false,
|
||||
name: "no-pol-no-peers-map-response",
|
||||
pol: &policy.ACLPolicy{},
|
||||
node: mini,
|
||||
peers: types.Nodes{},
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
cfg: &types.Config{
|
||||
BaseDomain: "",
|
||||
DNSConfig: &tailcfg.DNSConfig{},
|
||||
LogTail: types.LogTailConfig{Enabled: false},
|
||||
RandomizeClientPort: false,
|
||||
},
|
||||
want: &tailcfg.MapResponse{
|
||||
Node: tailMini,
|
||||
KeepAlive: false,
|
||||
|
@ -383,11 +386,13 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
peers: types.Nodes{
|
||||
peer1,
|
||||
},
|
||||
baseDomain: "",
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
logtail: false,
|
||||
randomClientPort: false,
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
cfg: &types.Config{
|
||||
BaseDomain: "",
|
||||
DNSConfig: &tailcfg.DNSConfig{},
|
||||
LogTail: types.LogTailConfig{Enabled: false},
|
||||
RandomizeClientPort: false,
|
||||
},
|
||||
want: &tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: tailMini,
|
||||
|
@ -424,11 +429,13 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
peer1,
|
||||
peer2,
|
||||
},
|
||||
baseDomain: "",
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
logtail: false,
|
||||
randomClientPort: false,
|
||||
derpMap: &tailcfg.DERPMap{},
|
||||
cfg: &types.Config{
|
||||
BaseDomain: "",
|
||||
DNSConfig: &tailcfg.DNSConfig{},
|
||||
LogTail: types.LogTailConfig{Enabled: false},
|
||||
RandomizeClientPort: false,
|
||||
},
|
||||
want: &tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: tailMini,
|
||||
|
@ -463,17 +470,15 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mappy := NewMapper(
|
||||
tt.node,
|
||||
tt.peers,
|
||||
nil,
|
||||
tt.cfg,
|
||||
tt.derpMap,
|
||||
tt.baseDomain,
|
||||
tt.dnsConfig,
|
||||
tt.logtail,
|
||||
tt.randomClientPort,
|
||||
nil,
|
||||
)
|
||||
|
||||
got, err := mappy.fullMapResponse(
|
||||
tt.node,
|
||||
tt.peers,
|
||||
tt.pol,
|
||||
0,
|
||||
)
|
||||
|
|
|
@ -3,12 +3,10 @@ package mapper
|
|||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
@ -17,9 +15,7 @@ func tailNodes(
|
|||
nodes types.Nodes,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
pol *policy.ACLPolicy,
|
||||
dnsConfig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
randomClientPort bool,
|
||||
cfg *types.Config,
|
||||
) ([]*tailcfg.Node, error) {
|
||||
tNodes := make([]*tailcfg.Node, len(nodes))
|
||||
|
||||
|
@ -28,9 +24,7 @@ func tailNodes(
|
|||
node,
|
||||
capVer,
|
||||
pol,
|
||||
dnsConfig,
|
||||
baseDomain,
|
||||
randomClientPort,
|
||||
cfg,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -48,11 +42,9 @@ func tailNode(
|
|||
node *types.Node,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
pol *policy.ACLPolicy,
|
||||
dnsConfig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
randomClientPort bool,
|
||||
cfg *types.Config,
|
||||
) (*tailcfg.Node, error) {
|
||||
addrs := node.IPAddresses.Prefixes()
|
||||
addrs := node.Prefixes()
|
||||
|
||||
allowedIPs := append(
|
||||
[]netip.Prefix{},
|
||||
|
@ -85,7 +77,7 @@ func tailNode(
|
|||
keyExpiry = time.Time{}
|
||||
}
|
||||
|
||||
hostname, err := node.GetFQDN(dnsConfig, baseDomain)
|
||||
hostname, err := node.GetFQDN(cfg.DNSConfig, cfg.BaseDomain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err)
|
||||
}
|
||||
|
@ -94,12 +86,10 @@ func tailNode(
|
|||
tags = lo.Uniq(append(tags, node.ForcedTags...))
|
||||
|
||||
tNode := tailcfg.Node{
|
||||
ID: tailcfg.NodeID(node.ID), // this is the actual ID
|
||||
StableID: tailcfg.StableNodeID(
|
||||
strconv.FormatUint(node.ID, util.Base10),
|
||||
), // in headscale, unlike tailcontrol server, IDs are permanent
|
||||
Name: hostname,
|
||||
Cap: capVer,
|
||||
ID: tailcfg.NodeID(node.ID), // this is the actual ID
|
||||
StableID: node.ID.StableID(),
|
||||
Name: hostname,
|
||||
Cap: capVer,
|
||||
|
||||
User: tailcfg.UserID(node.UserID),
|
||||
|
||||
|
@ -133,7 +123,7 @@ func tailNode(
|
|||
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
|
||||
}
|
||||
|
||||
if randomClientPort {
|
||||
if cfg.RandomizeClientPort {
|
||||
tNode.CapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{}
|
||||
}
|
||||
} else {
|
||||
|
@ -143,7 +133,7 @@ func tailNode(
|
|||
tailcfg.CapabilitySSH,
|
||||
}
|
||||
|
||||
if randomClientPort {
|
||||
if cfg.RandomizeClientPort {
|
||||
tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrRandomizeClientPort)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,9 +89,7 @@ func TestTailNode(t *testing.T) {
|
|||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPAddresses: []netip.Addr{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
IPv4: iap("100.64.0.1"),
|
||||
Hostname: "mini",
|
||||
GivenName: "mini",
|
||||
UserID: 0,
|
||||
|
@ -99,7 +97,6 @@ func TestTailNode(t *testing.T) {
|
|||
Name: "mini",
|
||||
},
|
||||
ForcedTags: []string{},
|
||||
AuthKeyID: 0,
|
||||
AuthKey: &types.PreAuthKey{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
|
@ -182,13 +179,16 @@ func TestTailNode(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &types.Config{
|
||||
BaseDomain: tt.baseDomain,
|
||||
DNSConfig: tt.dnsConfig,
|
||||
RandomizeClientPort: false,
|
||||
}
|
||||
got, err := tailNode(
|
||||
tt.node,
|
||||
0,
|
||||
tt.pol,
|
||||
tt.dnsConfig,
|
||||
tt.baseDomain,
|
||||
false,
|
||||
cfg,
|
||||
)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
|
|
@ -1,25 +1,120 @@
|
|||
package hscontrol
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"tailscale.com/envknob"
|
||||
)
|
||||
|
||||
var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS")
|
||||
|
||||
var mapResponseLastSentSeconds *prometheus.GaugeVec
|
||||
|
||||
func init() {
|
||||
if debugHighCardinalityMetrics {
|
||||
mapResponseLastSentSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_last_sent_seconds",
|
||||
Help: "last sent metric to node.id",
|
||||
}, []string{"type", "id"})
|
||||
}
|
||||
}
|
||||
|
||||
const prometheusNamespace = "headscale"
|
||||
|
||||
var (
|
||||
// This is a high cardinality metric (user x node), we might want to make this
|
||||
// configurable/opt-in in the future.
|
||||
nodeRegistrations = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
mapResponseSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "node_registrations_total",
|
||||
Help: "The total amount of registered node attempts",
|
||||
}, []string{"action", "auth", "status", "user"})
|
||||
|
||||
updateRequestsSentToNode = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "mapresponse_sent_total",
|
||||
Help: "total count of mapresponses sent to clients",
|
||||
}, []string{"status", "type"})
|
||||
mapResponseUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "update_request_sent_to_node_total",
|
||||
Help: "The number of calls/messages issued on a specific nodes update channel",
|
||||
}, []string{"user", "node", "status"})
|
||||
// TODO(kradalby): This is very debugging, we might want to remove it.
|
||||
Name: "mapresponse_updates_received_total",
|
||||
Help: "total count of mapresponse updates received on update channel",
|
||||
}, []string{"type"})
|
||||
mapResponseWriteUpdatesInStream = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_write_updates_in_stream_total",
|
||||
Help: "total count of writes that occured in a stream session, pre-68 nodes",
|
||||
}, []string{"status"})
|
||||
mapResponseEndpointUpdates = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_endpoint_updates_total",
|
||||
Help: "total count of endpoint updates received",
|
||||
}, []string{"status"})
|
||||
mapResponseReadOnly = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_readonly_requests_total",
|
||||
Help: "total count of readonly requests received",
|
||||
}, []string{"status"})
|
||||
mapResponseEnded = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_ended_total",
|
||||
Help: "total count of new mapsessions ended",
|
||||
}, []string{"reason"})
|
||||
mapResponseClosed = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "mapresponse_closed_total",
|
||||
Help: "total count of calls to mapresponse close",
|
||||
}, []string{"return"})
|
||||
httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "http_duration_seconds",
|
||||
Help: "Duration of HTTP requests.",
|
||||
}, []string{"path"})
|
||||
httpCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "http_requests_total",
|
||||
Help: "Total number of http requests processed",
|
||||
}, []string{"code", "method", "path"},
|
||||
)
|
||||
)
|
||||
|
||||
// prometheusMiddleware implements mux.MiddlewareFunc.
|
||||
func prometheusMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
// Ignore streaming and noise sessions
|
||||
// it has its own router further down.
|
||||
if path == "/ts2021" || path == "/machine/map" || path == "/derp" || path == "/derp/probe" || path == "/bootstrap-dns" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
rw := &respWriterProm{ResponseWriter: w}
|
||||
|
||||
timer := prometheus.NewTimer(httpDuration.WithLabelValues(path))
|
||||
next.ServeHTTP(rw, r)
|
||||
timer.ObserveDuration()
|
||||
httpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc()
|
||||
})
|
||||
}
|
||||
|
||||
type respWriterProm struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *respWriterProm) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *respWriterProm) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
|
|
@ -95,18 +95,19 @@ func (h *Headscale) NoiseUpgradeHandler(
|
|||
// The HTTP2 server that exposes this router is created for
|
||||
// a single hijacked connection from /ts2021, using netutil.NewOneConnListener
|
||||
router := mux.NewRouter()
|
||||
router.Use(prometheusMiddleware)
|
||||
|
||||
router.HandleFunc("/machine/register", noiseServer.NoiseRegistrationHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/map", noiseServer.NoisePollNetMapHandler)
|
||||
|
||||
server := http.Server{
|
||||
ReadTimeout: types.HTTPReadTimeout,
|
||||
ReadTimeout: types.HTTPTimeout,
|
||||
}
|
||||
|
||||
noiseServer.httpBaseConfig = &http.Server{
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: types.HTTPReadTimeout,
|
||||
ReadHeaderTimeout: types.HTTPTimeout,
|
||||
}
|
||||
noiseServer.http2Server = &http2.Server{}
|
||||
|
||||
|
@ -163,3 +164,79 @@ func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
MinimumCapVersion tailcfg.CapabilityVersion = 58
|
||||
)
|
||||
|
||||
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (ns *noiseServer) NoisePollNetMapHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Msg("PollNetMapHandler called")
|
||||
|
||||
log.Trace().
|
||||
Any("headers", req.Header).
|
||||
Caller().
|
||||
Msg("Headers")
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
mapRequest := tailcfg.MapRequest{}
|
||||
if err := json.Unmarshal(body, &mapRequest); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse MapRequest")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if mapRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(mapRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = mapRequest.NodeKey
|
||||
|
||||
node, err := ns.headscale.db.GetNodeByAnyKey(
|
||||
ns.conn.Peer(),
|
||||
mapRequest.NodeKey,
|
||||
key.NodePublic{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String())
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, node)
|
||||
sess.tracef("a node sending a MapRequest with Noise protocol")
|
||||
if !sess.isStreaming() {
|
||||
sess.serve()
|
||||
} else {
|
||||
sess.serveLongPoll()
|
||||
}
|
||||
}
|
||||
|
|
68
hscontrol/notifier/metrics.go
Normal file
68
hscontrol/notifier/metrics.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package notifier
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"tailscale.com/envknob"
|
||||
)
|
||||
|
||||
const prometheusNamespace = "headscale"
|
||||
|
||||
var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS")
|
||||
|
||||
var notifierUpdateSent *prometheus.CounterVec
|
||||
|
||||
func init() {
|
||||
if debugHighCardinalityMetrics {
|
||||
notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_update_sent_total",
|
||||
Help: "total count of update sent on nodes channel",
|
||||
}, []string{"status", "type", "trigger", "id"})
|
||||
} else {
|
||||
notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_update_sent_total",
|
||||
Help: "total count of update sent on nodes channel",
|
||||
}, []string{"status", "type", "trigger"})
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
notifierWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_waiters_for_lock",
|
||||
Help: "gauge of waiters for the notifier lock",
|
||||
}, []string{"type", "action"})
|
||||
notifierWaitForLock = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_wait_for_lock_seconds",
|
||||
Help: "histogram of time spent waiting for the notifier lock",
|
||||
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.5, 1, 3, 5, 10},
|
||||
}, []string{"action"})
|
||||
notifierUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_update_received_total",
|
||||
Help: "total count of updates received by notifier",
|
||||
}, []string{"type", "trigger"})
|
||||
notifierNodeUpdateChans = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_open_channels_total",
|
||||
Help: "total count open channels in notifier",
|
||||
})
|
||||
notifierBatcherWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_batcher_waiters_for_lock",
|
||||
Help: "gauge of waiters for the notifier batcher lock",
|
||||
}, []string{"type", "action"})
|
||||
notifierBatcherChanges = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_batcher_changes_pending",
|
||||
Help: "gauge of full changes pending in the notifier batcher",
|
||||
}, []string{})
|
||||
notifierBatcherPatches = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "notifier_batcher_patches_pending",
|
||||
Help: "gauge of patches pending in the notifier batcher",
|
||||
}, []string{})
|
||||
)
|
|
@ -3,81 +3,143 @@ package notifier
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/types/key"
|
||||
"github.com/sasha-s/go-deadlock"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
l sync.RWMutex
|
||||
nodes map[string]chan<- types.StateUpdate
|
||||
connected map[key.MachinePublic]bool
|
||||
}
|
||||
var debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK")
|
||||
var debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT")
|
||||
|
||||
func NewNotifier() *Notifier {
|
||||
return &Notifier{
|
||||
nodes: make(map[string]chan<- types.StateUpdate),
|
||||
connected: make(map[key.MachinePublic]bool),
|
||||
func init() {
|
||||
deadlock.Opts.Disable = !debugDeadlock
|
||||
if debugDeadlock {
|
||||
deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout()
|
||||
deadlock.Opts.PrintAllCurrentGoroutines = true
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) {
|
||||
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node")
|
||||
defer log.Trace().
|
||||
Caller().
|
||||
Str("key", machineKey.ShortString()).
|
||||
Msg("releasing lock to add node")
|
||||
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
|
||||
n.nodes[machineKey.String()] = c
|
||||
n.connected[machineKey] = true
|
||||
|
||||
log.Trace().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Int("open_chans", len(n.nodes)).
|
||||
Msg("Added new channel")
|
||||
type Notifier struct {
|
||||
l deadlock.Mutex
|
||||
nodes map[types.NodeID]chan<- types.StateUpdate
|
||||
connected *xsync.MapOf[types.NodeID, bool]
|
||||
b *batcher
|
||||
cfg *types.Config
|
||||
}
|
||||
|
||||
func (n *Notifier) RemoveNode(machineKey key.MachinePublic) {
|
||||
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node")
|
||||
defer log.Trace().
|
||||
Caller().
|
||||
Str("key", machineKey.ShortString()).
|
||||
Msg("releasing lock to remove node")
|
||||
func NewNotifier(cfg *types.Config) *Notifier {
|
||||
n := &Notifier{
|
||||
nodes: make(map[types.NodeID]chan<- types.StateUpdate),
|
||||
connected: xsync.NewMapOf[types.NodeID, bool](),
|
||||
cfg: cfg,
|
||||
}
|
||||
b := newBatcher(cfg.Tuning.BatchChangeDelay, n)
|
||||
n.b = b
|
||||
|
||||
go b.doWork()
|
||||
return n
|
||||
}
|
||||
|
||||
// Close stops the batcher inside the notifier.
|
||||
func (n *Notifier) Close() {
|
||||
n.b.close()
|
||||
}
|
||||
|
||||
func (n *Notifier) tracef(nID types.NodeID, msg string, args ...any) {
|
||||
log.Trace().
|
||||
Uint64("node.id", nID.Uint64()).
|
||||
Int("open_chans", len(n.nodes)).Msgf(msg, args...)
|
||||
}
|
||||
|
||||
func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) {
|
||||
start := time.Now()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "add").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "add").Dec()
|
||||
notifierWaitForLock.WithLabelValues("add").Observe(time.Since(start).Seconds())
|
||||
|
||||
// If a channel exists, it means the node has opened a new
|
||||
// connection. Close the old channel and replace it.
|
||||
if curr, ok := n.nodes[nodeID]; ok {
|
||||
n.tracef(nodeID, "channel present, closing and replacing")
|
||||
close(curr)
|
||||
}
|
||||
|
||||
n.nodes[nodeID] = c
|
||||
n.connected.Store(nodeID, true)
|
||||
|
||||
n.tracef(nodeID, "added new channel")
|
||||
notifierNodeUpdateChans.Inc()
|
||||
}
|
||||
|
||||
// RemoveNode removes a node and a given channel from the notifier.
|
||||
// It checks that the channel is the same as currently being updated
|
||||
// and ignores the removal if it is not.
|
||||
// RemoveNode reports if the node/chan was removed.
|
||||
func (n *Notifier) RemoveNode(nodeID types.NodeID, c chan<- types.StateUpdate) bool {
|
||||
start := time.Now()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "remove").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "remove").Dec()
|
||||
notifierWaitForLock.WithLabelValues("remove").Observe(time.Since(start).Seconds())
|
||||
|
||||
if len(n.nodes) == 0 {
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
delete(n.nodes, machineKey.String())
|
||||
n.connected[machineKey] = false
|
||||
// If the channel exist, but it does not belong
|
||||
// to the caller, ignore.
|
||||
if curr, ok := n.nodes[nodeID]; ok {
|
||||
if curr != c {
|
||||
n.tracef(nodeID, "channel has been replaced, not removing")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Int("open_chans", len(n.nodes)).
|
||||
Msg("Removed channel")
|
||||
delete(n.nodes, nodeID)
|
||||
n.connected.Store(nodeID, false)
|
||||
|
||||
n.tracef(nodeID, "removed channel")
|
||||
notifierNodeUpdateChans.Dec()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsConnected reports if a node is connected to headscale and has a
|
||||
// poll session open.
|
||||
func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
func (n *Notifier) IsConnected(nodeID types.NodeID) bool {
|
||||
notifierWaitersForLock.WithLabelValues("lock", "conncheck").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "conncheck").Dec()
|
||||
|
||||
return n.connected[machineKey]
|
||||
if val, ok := n.connected.Load(nodeID); ok {
|
||||
return val
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO(kradalby): This returns a pointer and can be dangerous.
|
||||
func (n *Notifier) ConnectedMap() map[key.MachinePublic]bool {
|
||||
// IsLikelyConnected reports if a node is connected to headscale and has a
|
||||
// poll session open, but doesnt lock, so might be wrong.
|
||||
func (n *Notifier) IsLikelyConnected(nodeID types.NodeID) bool {
|
||||
if val, ok := n.connected.Load(nodeID); ok {
|
||||
return val
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *Notifier) LikelyConnectedMap() *xsync.MapOf[types.NodeID, bool] {
|
||||
return n.connected
|
||||
}
|
||||
|
||||
|
@ -88,86 +150,288 @@ func (n *Notifier) NotifyAll(ctx context.Context, update types.StateUpdate) {
|
|||
func (n *Notifier) NotifyWithIgnore(
|
||||
ctx context.Context,
|
||||
update types.StateUpdate,
|
||||
ignore ...string,
|
||||
ignoreNodeIDs ...types.NodeID,
|
||||
) {
|
||||
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
|
||||
defer log.Trace().
|
||||
Caller().
|
||||
Interface("type", update.Type).
|
||||
Msg("releasing lock, finished notifying")
|
||||
notifierUpdateReceived.WithLabelValues(update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
||||
n.b.addOrPassthrough(update)
|
||||
}
|
||||
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
for key, c := range n.nodes {
|
||||
if util.IsStringInSlice(ignore, key) {
|
||||
continue
|
||||
}
|
||||
func (n *Notifier) NotifyByNodeID(
|
||||
ctx context.Context,
|
||||
update types.StateUpdate,
|
||||
nodeID types.NodeID,
|
||||
) {
|
||||
start := time.Now()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "notify").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "notify").Dec()
|
||||
notifierWaitForLock.WithLabelValues("notify").Observe(time.Since(start).Seconds())
|
||||
|
||||
if c, ok := n.nodes[nodeID]; ok {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Error().
|
||||
Err(ctx.Err()).
|
||||
Str("mkey", key).
|
||||
Any("origin", ctx.Value("origin")).
|
||||
Any("hostname", ctx.Value("hostname")).
|
||||
Uint64("node.id", nodeID.Uint64()).
|
||||
Any("origin", types.NotifyOriginKey.Value(ctx)).
|
||||
Any("origin-hostname", types.NotifyHostnameKey.Value(ctx)).
|
||||
Msgf("update not sent, context cancelled")
|
||||
if debugHighCardinalityMetrics {
|
||||
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc()
|
||||
} else {
|
||||
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
||||
}
|
||||
|
||||
return
|
||||
case c <- update:
|
||||
log.Trace().
|
||||
Str("mkey", key).
|
||||
Any("origin", ctx.Value("origin")).
|
||||
Any("hostname", ctx.Value("hostname")).
|
||||
Msgf("update successfully sent on chan")
|
||||
n.tracef(nodeID, "update successfully sent on chan, origin: %s, origin-hostname: %s", ctx.Value("origin"), ctx.Value("hostname"))
|
||||
if debugHighCardinalityMetrics {
|
||||
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc()
|
||||
} else {
|
||||
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) NotifyByMachineKey(
|
||||
ctx context.Context,
|
||||
update types.StateUpdate,
|
||||
mKey key.MachinePublic,
|
||||
) {
|
||||
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
|
||||
defer log.Trace().
|
||||
Caller().
|
||||
Interface("type", update.Type).
|
||||
Msg("releasing lock, finished notifying")
|
||||
func (n *Notifier) sendAll(update types.StateUpdate) {
|
||||
start := time.Now()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "send-all").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "send-all").Dec()
|
||||
notifierWaitForLock.WithLabelValues("send-all").Observe(time.Since(start).Seconds())
|
||||
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
if c, ok := n.nodes[mKey.String()]; ok {
|
||||
for id, c := range n.nodes {
|
||||
// Whenever an update is sent to all nodes, there is a chance that the node
|
||||
// has disconnected and the goroutine that was supposed to consume the update
|
||||
// has shut down the channel and is waiting for the lock held here in RemoveNode.
|
||||
// This means that there is potential for a deadlock which would stop all updates
|
||||
// going out to clients. This timeout prevents that from happening by moving on to the
|
||||
// next node if the context is cancelled. Afther sendAll releases the lock, the add/remove
|
||||
// call will succeed and the update will go to the correct nodes on the next call.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), n.cfg.Tuning.NotifierSendTimeout)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Error().
|
||||
Err(ctx.Err()).
|
||||
Str("mkey", mKey.String()).
|
||||
Any("origin", ctx.Value("origin")).
|
||||
Any("hostname", ctx.Value("hostname")).
|
||||
Uint64("node.id", id.Uint64()).
|
||||
Msgf("update not sent, context cancelled")
|
||||
if debugHighCardinalityMetrics {
|
||||
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all", id.String()).Inc()
|
||||
} else {
|
||||
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all").Inc()
|
||||
}
|
||||
|
||||
return
|
||||
case c <- update:
|
||||
log.Trace().
|
||||
Str("mkey", mKey.String()).
|
||||
Any("origin", ctx.Value("origin")).
|
||||
Any("hostname", ctx.Value("hostname")).
|
||||
Msgf("update successfully sent on chan")
|
||||
if debugHighCardinalityMetrics {
|
||||
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all", id.String()).Inc()
|
||||
} else {
|
||||
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all").Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) String() string {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "string").Inc()
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
notifierWaitersForLock.WithLabelValues("lock", "string").Dec()
|
||||
|
||||
str := []string{"Notifier, in map:\n"}
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "chans (%d):\n", len(n.nodes))
|
||||
|
||||
for k, v := range n.nodes {
|
||||
str = append(str, fmt.Sprintf("\t%s: %v\n", k, v))
|
||||
var keys []types.NodeID
|
||||
n.connected.Range(func(key types.NodeID, value bool) bool {
|
||||
keys = append(keys, key)
|
||||
return true
|
||||
})
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
return keys[i] < keys[j]
|
||||
})
|
||||
|
||||
for _, key := range keys {
|
||||
fmt.Fprintf(&b, "\t%d: %p\n", key, n.nodes[key])
|
||||
}
|
||||
|
||||
return strings.Join(str, "")
|
||||
b.WriteString("\n")
|
||||
fmt.Fprintf(&b, "connected (%d):\n", len(n.nodes))
|
||||
|
||||
for _, key := range keys {
|
||||
val, _ := n.connected.Load(key)
|
||||
fmt.Fprintf(&b, "\t%d: %t\n", key, val)
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type batcher struct {
|
||||
tick *time.Ticker
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
cancelCh chan struct{}
|
||||
|
||||
changedNodeIDs set.Slice[types.NodeID]
|
||||
nodesChanged bool
|
||||
patches map[types.NodeID]tailcfg.PeerChange
|
||||
patchesChanged bool
|
||||
|
||||
n *Notifier
|
||||
}
|
||||
|
||||
func newBatcher(batchTime time.Duration, n *Notifier) *batcher {
|
||||
return &batcher{
|
||||
tick: time.NewTicker(batchTime),
|
||||
cancelCh: make(chan struct{}),
|
||||
patches: make(map[types.NodeID]tailcfg.PeerChange),
|
||||
n: n,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (b *batcher) close() {
|
||||
b.cancelCh <- struct{}{}
|
||||
}
|
||||
|
||||
// addOrPassthrough adds the update to the batcher, if it is not a
|
||||
// type that is currently batched, it will be sent immediately.
|
||||
func (b *batcher) addOrPassthrough(update types.StateUpdate) {
|
||||
notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Inc()
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Dec()
|
||||
|
||||
switch update.Type {
|
||||
case types.StatePeerChanged:
|
||||
b.changedNodeIDs.Add(update.ChangeNodes...)
|
||||
b.nodesChanged = true
|
||||
notifierBatcherChanges.WithLabelValues().Set(float64(b.changedNodeIDs.Len()))
|
||||
|
||||
case types.StatePeerChangedPatch:
|
||||
for _, newPatch := range update.ChangePatches {
|
||||
if curr, ok := b.patches[types.NodeID(newPatch.NodeID)]; ok {
|
||||
overwritePatch(&curr, newPatch)
|
||||
b.patches[types.NodeID(newPatch.NodeID)] = curr
|
||||
} else {
|
||||
b.patches[types.NodeID(newPatch.NodeID)] = *newPatch
|
||||
}
|
||||
}
|
||||
b.patchesChanged = true
|
||||
notifierBatcherPatches.WithLabelValues().Set(float64(len(b.patches)))
|
||||
|
||||
default:
|
||||
b.n.sendAll(update)
|
||||
}
|
||||
}
|
||||
|
||||
// flush sends all the accumulated patches to all
|
||||
// nodes in the notifier.
|
||||
func (b *batcher) flush() {
|
||||
notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Inc()
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Dec()
|
||||
|
||||
if b.nodesChanged || b.patchesChanged {
|
||||
var patches []*tailcfg.PeerChange
|
||||
// If a node is getting a full update from a change
|
||||
// node update, then the patch can be dropped.
|
||||
for nodeID, patch := range b.patches {
|
||||
if b.changedNodeIDs.Contains(nodeID) {
|
||||
delete(b.patches, nodeID)
|
||||
} else {
|
||||
patches = append(patches, &patch)
|
||||
}
|
||||
}
|
||||
|
||||
changedNodes := b.changedNodeIDs.Slice().AsSlice()
|
||||
sort.Slice(changedNodes, func(i, j int) bool {
|
||||
return changedNodes[i] < changedNodes[j]
|
||||
})
|
||||
|
||||
if b.changedNodeIDs.Slice().Len() > 0 {
|
||||
update := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: changedNodes,
|
||||
}
|
||||
|
||||
b.n.sendAll(update)
|
||||
}
|
||||
|
||||
if len(patches) > 0 {
|
||||
patchUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: patches,
|
||||
}
|
||||
|
||||
b.n.sendAll(patchUpdate)
|
||||
}
|
||||
|
||||
b.changedNodeIDs = set.Slice[types.NodeID]{}
|
||||
notifierBatcherChanges.WithLabelValues().Set(0)
|
||||
b.nodesChanged = false
|
||||
b.patches = make(map[types.NodeID]tailcfg.PeerChange, len(b.patches))
|
||||
notifierBatcherPatches.WithLabelValues().Set(0)
|
||||
b.patchesChanged = false
|
||||
}
|
||||
}
|
||||
|
||||
func (b *batcher) doWork() {
|
||||
for {
|
||||
select {
|
||||
case <-b.cancelCh:
|
||||
return
|
||||
case <-b.tick.C:
|
||||
b.flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// overwritePatch takes the current patch and a newer patch
|
||||
// and override any field that has changed
|
||||
func overwritePatch(currPatch, newPatch *tailcfg.PeerChange) {
|
||||
if newPatch.DERPRegion != 0 {
|
||||
currPatch.DERPRegion = newPatch.DERPRegion
|
||||
}
|
||||
|
||||
if newPatch.Cap != 0 {
|
||||
currPatch.Cap = newPatch.Cap
|
||||
}
|
||||
|
||||
if newPatch.CapMap != nil {
|
||||
currPatch.CapMap = newPatch.CapMap
|
||||
}
|
||||
|
||||
if newPatch.Endpoints != nil {
|
||||
currPatch.Endpoints = newPatch.Endpoints
|
||||
}
|
||||
|
||||
if newPatch.Key != nil {
|
||||
currPatch.Key = newPatch.Key
|
||||
}
|
||||
|
||||
if newPatch.KeySignature != nil {
|
||||
currPatch.KeySignature = newPatch.KeySignature
|
||||
}
|
||||
|
||||
if newPatch.DiscoKey != nil {
|
||||
currPatch.DiscoKey = newPatch.DiscoKey
|
||||
}
|
||||
|
||||
if newPatch.Online != nil {
|
||||
currPatch.Online = newPatch.Online
|
||||
}
|
||||
|
||||
if newPatch.LastSeen != nil {
|
||||
currPatch.LastSeen = newPatch.LastSeen
|
||||
}
|
||||
|
||||
if newPatch.KeyExpiry != nil {
|
||||
currPatch.KeyExpiry = newPatch.KeyExpiry
|
||||
}
|
||||
}
|
||||
|
|
249
hscontrol/notifier/notifier_test.go
Normal file
249
hscontrol/notifier/notifier_test.go
Normal file
|
@ -0,0 +1,249 @@
|
|||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestBatcher(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
updates []types.StateUpdate
|
||||
want []types.StateUpdate
|
||||
}{
|
||||
{
|
||||
name: "full-passthrough",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StateFullUpdate,
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StateFullUpdate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "derp-passthrough",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StateDERPUpdated,
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StateDERPUpdated,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single-node-update",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: []types.NodeID{
|
||||
2,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: []types.NodeID{
|
||||
2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge-node-update",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: []types.NodeID{
|
||||
2, 4,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: []types.NodeID{
|
||||
2, 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: []types.NodeID{
|
||||
2, 3, 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single-patch-update",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 2,
|
||||
DERPRegion: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 2,
|
||||
DERPRegion: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge-patch-to-same-node-update",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 2,
|
||||
DERPRegion: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 2,
|
||||
DERPRegion: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 2,
|
||||
DERPRegion: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge-patch-to-multiple-node-update",
|
||||
updates: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 3,
|
||||
Endpoints: []netip.AddrPort{
|
||||
netip.MustParseAddrPort("1.1.1.1:9090"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 3,
|
||||
Endpoints: []netip.AddrPort{
|
||||
netip.MustParseAddrPort("1.1.1.1:9090"),
|
||||
netip.MustParseAddrPort("2.2.2.2:8080"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 4,
|
||||
DERPRegion: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 4,
|
||||
Cap: tailcfg.CapabilityVersion(54),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []types.StateUpdate{
|
||||
{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 3,
|
||||
Endpoints: []netip.AddrPort{
|
||||
netip.MustParseAddrPort("1.1.1.1:9090"),
|
||||
netip.MustParseAddrPort("2.2.2.2:8080"),
|
||||
},
|
||||
},
|
||||
{
|
||||
NodeID: 4,
|
||||
DERPRegion: 6,
|
||||
Cap: tailcfg.CapabilityVersion(54),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := NewNotifier(&types.Config{
|
||||
Tuning: types.Tuning{
|
||||
// We will call flush manually for the tests,
|
||||
// so do not run the worker.
|
||||
BatchChangeDelay: time.Hour,
|
||||
},
|
||||
})
|
||||
|
||||
ch := make(chan types.StateUpdate, 30)
|
||||
defer close(ch)
|
||||
n.AddNode(1, ch)
|
||||
defer n.RemoveNode(1, ch)
|
||||
|
||||
for _, u := range tt.updates {
|
||||
n.NotifyAll(context.Background(), u)
|
||||
}
|
||||
|
||||
n.b.flush()
|
||||
|
||||
var got []types.StateUpdate
|
||||
for len(ch) > 0 {
|
||||
out := <-ch
|
||||
got = append(got, out)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("batcher() unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -58,12 +58,7 @@ func (h *Headscale) initOIDC() error {
|
|||
h.oidcProvider, err = oidc.NewProvider(context.Background(), h.cfg.OIDC.Issuer)
|
||||
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Caller().
|
||||
Msgf("Could not retrieve OIDC Config: %s", err.Error())
|
||||
|
||||
return err
|
||||
return fmt.Errorf("creating OIDC provider from issuer config: %w", err)
|
||||
}
|
||||
|
||||
h.oauth2Config = &oauth2.Config{
|
||||
|
@ -514,12 +509,6 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
User: claims.Email,
|
||||
Verb: "Reauthenticated",
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "OIDCCallback").
|
||||
Str("type", "reauthenticate").
|
||||
Err(err).
|
||||
Msg("Could not render OIDC callback template")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, werr := writer.Write([]byte("Could not render OIDC callback template"))
|
||||
|
@ -527,7 +516,7 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return nil, true, err
|
||||
return nil, true, fmt.Errorf("rendering OIDC callback template: %w", err)
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
@ -537,11 +526,8 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdateExpire(node.ID, expiry)
|
||||
if stateUpdate.Valid() {
|
||||
ctx := types.NotifyCtx(context.Background(), "oidc-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
ctx := types.NotifyCtx(context.Background(), "oidc-expiry", "na")
|
||||
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, expiry), node.ID)
|
||||
|
||||
return nil, true, nil
|
||||
}
|
||||
|
@ -582,10 +568,6 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
|
|||
if errors.Is(err, db.ErrUserNotFound) {
|
||||
user, err = h.db.CreateUser(userName)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Caller().
|
||||
Msgf("could not create new user '%s'", userName)
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, werr := writer.Write([]byte("could not create user"))
|
||||
|
@ -593,14 +575,9 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
|
|||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating new user: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("user", userName).
|
||||
Msg("could not find or create user")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, werr := writer.Write([]byte("could not find or create user"))
|
||||
|
@ -608,7 +585,7 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
|
|||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("find or create user: %w", err)
|
||||
}
|
||||
|
||||
return user, nil
|
||||
|
@ -620,12 +597,12 @@ func (h *Headscale) registerNodeForOIDCCallback(
|
|||
machineKey *key.MachinePublic,
|
||||
expiry time.Time,
|
||||
) error {
|
||||
addrs, err := h.ipAlloc.Next()
|
||||
ipv4, ipv6, err := h.ipAlloc.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := h.db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
if err := h.db.Write(func(tx *gorm.DB) error {
|
||||
if _, err := db.RegisterNodeFromAuthCallback(
|
||||
// TODO(kradalby): find a better way to use the cache across modules
|
||||
tx,
|
||||
|
@ -634,7 +611,7 @@ func (h *Headscale) registerNodeForOIDCCallback(
|
|||
user.Name,
|
||||
&expiry,
|
||||
util.RegisterMethodOIDC,
|
||||
addrs,
|
||||
ipv4, ipv6,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -664,12 +641,6 @@ func renderOIDCCallbackTemplate(
|
|||
User: claims.Email,
|
||||
Verb: "Authenticated",
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "OIDCCallback").
|
||||
Str("type", "authenticate").
|
||||
Err(err).
|
||||
Msg("Could not render OIDC callback template")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, werr := writer.Write([]byte("Could not render OIDC callback template"))
|
||||
|
@ -677,7 +648,7 @@ func renderOIDCCallbackTemplate(
|
|||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("rendering OIDC callback template: %w", err)
|
||||
}
|
||||
|
||||
return &content, nil
|
||||
|
|
|
@ -36,6 +36,38 @@ const (
|
|||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
var theInternetSet *netipx.IPSet
|
||||
|
||||
// theInternet returns the IPSet for the Internet.
|
||||
// https://www.youtube.com/watch?v=iDbyYGrswtg
|
||||
func theInternet() *netipx.IPSet {
|
||||
if theInternetSet != nil {
|
||||
return theInternetSet
|
||||
}
|
||||
|
||||
var internetBuilder netipx.IPSetBuilder
|
||||
internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3"))
|
||||
internetBuilder.AddPrefix(netip.MustParsePrefix("0.0.0.0/0"))
|
||||
|
||||
// Delete Private network addresses
|
||||
// https://datatracker.ietf.org/doc/html/rfc1918
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7"))
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8"))
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12"))
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16"))
|
||||
|
||||
// Delete Tailscale networks
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("fd7a:115c:a1e0::/48"))
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("100.64.0.0/10"))
|
||||
|
||||
// Delete "cant find DHCP networks"
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-loca
|
||||
internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16"))
|
||||
|
||||
theInternetSet, _ := internetBuilder.IPSet()
|
||||
return theInternetSet
|
||||
}
|
||||
|
||||
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
|
@ -114,7 +146,7 @@ func LoadACLPolicyFromBytes(acl []byte, format string) (*ACLPolicy, error) {
|
|||
return &policy, nil
|
||||
}
|
||||
|
||||
func GenerateFilterAndSSHRules(
|
||||
func GenerateFilterAndSSHRulesForTests(
|
||||
policy *ACLPolicy,
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
|
@ -124,40 +156,31 @@ func GenerateFilterAndSSHRules(
|
|||
return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil
|
||||
}
|
||||
|
||||
rules, err := policy.generateFilterRules(node, peers)
|
||||
rules, err := policy.CompileFilterRules(append(peers, node))
|
||||
if err != nil {
|
||||
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
|
||||
}
|
||||
|
||||
log.Trace().Interface("ACL", rules).Str("node", node.GivenName).Msg("ACL rules")
|
||||
|
||||
var sshPolicy *tailcfg.SSHPolicy
|
||||
sshRules, err := policy.generateSSHRules(node, peers)
|
||||
sshPolicy, err := policy.CompileSSHPolicy(node, peers)
|
||||
if err != nil {
|
||||
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Interface("SSH", sshRules).
|
||||
Str("node", node.GivenName).
|
||||
Msg("SSH rules")
|
||||
|
||||
if sshPolicy == nil {
|
||||
sshPolicy = &tailcfg.SSHPolicy{}
|
||||
}
|
||||
sshPolicy.Rules = sshRules
|
||||
|
||||
return rules, sshPolicy, nil
|
||||
}
|
||||
|
||||
// generateFilterRules takes a set of nodes and an ACLPolicy and generates a
|
||||
// CompileFilterRules takes a set of nodes and an ACLPolicy and generates a
|
||||
// set of Tailscale compatible FilterRules used to allow traffic on clients.
|
||||
func (pol *ACLPolicy) generateFilterRules(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
func (pol *ACLPolicy) CompileFilterRules(
|
||||
nodes types.Nodes,
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
if pol == nil {
|
||||
return tailcfg.FilterAllowAll, nil
|
||||
}
|
||||
|
||||
rules := []tailcfg.FilterRule{}
|
||||
nodes := append(peers, node)
|
||||
|
||||
for index, acl := range pol.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
|
@ -168,23 +191,14 @@ func (pol *ACLPolicy) generateFilterRules(
|
|||
for srcIndex, src := range acl.Sources {
|
||||
srcs, err := pol.expandSource(src, nodes)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Interface("src", src).
|
||||
Int("ACL index", index).
|
||||
Int("Src index", srcIndex).
|
||||
Msgf("Error parsing ACL")
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("parsing policy, acl index: %d->%d: %w", index, srcIndex, err)
|
||||
}
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
|
||||
protocols, isWildcard, err := parseProtocol(acl.Protocol)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol)
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("parsing policy, protocol err: %w ", err)
|
||||
}
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
|
@ -239,28 +253,28 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
|
|||
// record if the rule is actually relevant for the given node.
|
||||
dests := []tailcfg.NetPortRange{}
|
||||
|
||||
DEST_LOOP:
|
||||
for _, dest := range rule.DstPorts {
|
||||
expanded, err := util.ParseIPSet(dest.IP, nil)
|
||||
// Fail closed, if we cant parse it, then we should not allow
|
||||
// access.
|
||||
if err != nil {
|
||||
continue
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
if node.IPAddresses.InIPSet(expanded) {
|
||||
if node.InIPSet(expanded) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
// If the node exposes routes, ensure they are note removed
|
||||
// when the filters are reduced.
|
||||
if node.Hostinfo != nil {
|
||||
// TODO(kradalby): Evaluate if we should only keep
|
||||
// the routes if the route is enabled. This will
|
||||
// require database access in this part of the code.
|
||||
if len(node.Hostinfo.RoutableIPs) > 0 {
|
||||
for _, routableIP := range node.Hostinfo.RoutableIPs {
|
||||
if expanded.ContainsPrefix(routableIP) {
|
||||
if expanded.OverlapsPrefix(routableIP) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -279,10 +293,14 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
|
|||
return ret
|
||||
}
|
||||
|
||||
func (pol *ACLPolicy) generateSSHRules(
|
||||
func (pol *ACLPolicy) CompileSSHPolicy(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
) ([]*tailcfg.SSHRule, error) {
|
||||
) (*tailcfg.SSHPolicy, error) {
|
||||
if pol == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rules := []*tailcfg.SSHRule{}
|
||||
|
||||
acceptAction := tailcfg.SSHAction{
|
||||
|
@ -320,7 +338,7 @@ func (pol *ACLPolicy) generateSSHRules(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if !node.IPAddresses.InIPSet(destSet) {
|
||||
if !node.InIPSet(destSet) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -331,16 +349,12 @@ func (pol *ACLPolicy) generateSSHRules(
|
|||
case "check":
|
||||
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
|
||||
return nil, fmt.Errorf("parsing SSH policy, parsing check duration, index: %d: %w", index, err)
|
||||
} else {
|
||||
action = *checkAction
|
||||
}
|
||||
default:
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, unknown action '%s', skipping", index, sshACL.Action)
|
||||
|
||||
continue
|
||||
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", sshACL.Action, index, err)
|
||||
}
|
||||
|
||||
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
|
||||
|
@ -352,10 +366,7 @@ func (pol *ACLPolicy) generateSSHRules(
|
|||
} else if isGroup(rawSrc) {
|
||||
users, err := pol.expandUsersFromGroup(rawSrc)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("parsing SSH policy, expanding user from group, index: %d->%d: %w", index, innerIndex, err)
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
|
@ -369,10 +380,7 @@ func (pol *ACLPolicy) generateSSHRules(
|
|||
rawSrc,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("parsing SSH policy, expanding alias, index: %d->%d: %w", index, innerIndex, err)
|
||||
}
|
||||
for _, expandedSrc := range expandedSrcs.Prefixes() {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
|
@ -393,7 +401,9 @@ func (pol *ACLPolicy) generateSSHRules(
|
|||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
return &tailcfg.SSHPolicy{
|
||||
Rules: rules,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||
|
@ -502,7 +512,7 @@ func parseProtocol(protocol string) ([]int, bool, error) {
|
|||
default:
|
||||
protocolNumber, err := strconv.Atoi(protocol)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, false, fmt.Errorf("parsing protocol number: %w", err)
|
||||
}
|
||||
needsWildcard := protocolNumber != protocolTCP &&
|
||||
protocolNumber != protocolUDP &&
|
||||
|
@ -539,6 +549,7 @@ func (pol *ACLPolicy) expandSource(
|
|||
// - a host
|
||||
// - an ip
|
||||
// - a cidr
|
||||
// - an autogroup
|
||||
// and transform these in IPAddresses.
|
||||
func (pol *ACLPolicy) ExpandAlias(
|
||||
nodes types.Nodes,
|
||||
|
@ -564,6 +575,10 @@ func (pol *ACLPolicy) ExpandAlias(
|
|||
return pol.expandIPsFromTag(alias, nodes)
|
||||
}
|
||||
|
||||
if isAutoGroup(alias) {
|
||||
return expandAutoGroup(alias)
|
||||
}
|
||||
|
||||
// if alias is a user
|
||||
if ips, err := pol.expandIPsFromUser(alias, nodes); ips != nil {
|
||||
return ips, err
|
||||
|
@ -766,7 +781,7 @@ func (pol *ACLPolicy) expandIPsFromGroup(
|
|||
for _, user := range users {
|
||||
filteredNodes := filterNodesByUser(nodes, user)
|
||||
for _, node := range filteredNodes {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -782,7 +797,7 @@ func (pol *ACLPolicy) expandIPsFromTag(
|
|||
// check for forced tags
|
||||
for _, node := range nodes {
|
||||
if util.StringOrPrefixListContains(node.ForcedTags, alias) {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -814,7 +829,7 @@ func (pol *ACLPolicy) expandIPsFromTag(
|
|||
}
|
||||
|
||||
if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -837,7 +852,7 @@ func (pol *ACLPolicy) expandIPsFromUser(
|
|||
}
|
||||
|
||||
for _, node := range filteredNodes {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
|
||||
return build.IPSet()
|
||||
|
@ -855,7 +870,7 @@ func (pol *ACLPolicy) expandIPsFromSingleIP(
|
|||
build.Add(ip)
|
||||
|
||||
for _, node := range matches {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
|
||||
return build.IPSet()
|
||||
|
@ -872,11 +887,11 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix(
|
|||
// This is suboptimal and quite expensive, but if we only add the prefix, we will miss all the relevant IPv6
|
||||
// addresses for the hosts that belong to tailscale. This doesnt really affect stuff like subnet routers.
|
||||
for _, node := range nodes {
|
||||
for _, ip := range node.IPAddresses {
|
||||
for _, ip := range node.IPs() {
|
||||
// log.Trace().
|
||||
// Msgf("checking if node ip (%s) is part of prefix (%s): %v, is single ip prefix (%v), addr: %s", ip.String(), prefix.String(), prefix.Contains(ip), prefix.IsSingleIP(), prefix.Addr().String())
|
||||
if prefix.Contains(ip) {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
node.AppendToIPSet(&build)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -884,6 +899,16 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix(
|
|||
return build.IPSet()
|
||||
}
|
||||
|
||||
func expandAutoGroup(alias string) (*netipx.IPSet, error) {
|
||||
switch {
|
||||
case strings.HasPrefix(alias, "autogroup:internet"):
|
||||
return theInternet(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown autogroup %q", alias)
|
||||
}
|
||||
}
|
||||
|
||||
func isWildcard(str string) bool {
|
||||
return str == "*"
|
||||
}
|
||||
|
@ -896,6 +921,10 @@ func isTag(str string) bool {
|
|||
return strings.HasPrefix(str, "tag:")
|
||||
}
|
||||
|
||||
func isAutoGroup(str string) bool {
|
||||
return strings.HasPrefix(str, "autogroup:")
|
||||
}
|
||||
|
||||
// TagsOfNode will return the tags of the current node.
|
||||
// Invalid tags are tags added by a user on a node, and that user doesn't have authority to add this tag.
|
||||
// Valid tags are tags added by a user that is allowed in the ACL policy to add this tag.
|
||||
|
|
File diff suppressed because it is too large
Load diff
1074
hscontrol/poll.go
1074
hscontrol/poll.go
File diff suppressed because it is too large
Load diff
|
@ -1,96 +0,0 @@
|
|||
package hscontrol
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
MinimumCapVersion tailcfg.CapabilityVersion = 58
|
||||
)
|
||||
|
||||
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (ns *noiseServer) NoisePollNetMapHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Msg("PollNetMapHandler called")
|
||||
|
||||
log.Trace().
|
||||
Any("headers", req.Header).
|
||||
Caller().
|
||||
Msg("Headers")
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
mapRequest := tailcfg.MapRequest{}
|
||||
if err := json.Unmarshal(body, &mapRequest); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse MapRequest")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if mapRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(mapRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = mapRequest.NodeKey
|
||||
|
||||
node, err := ns.headscale.db.GetNodeByAnyKey(
|
||||
ns.conn.Peer(),
|
||||
mapRequest.NodeKey,
|
||||
key.NodePublic{},
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Msgf("Ignoring request, cannot find node with key %s", mapRequest.NodeKey.String())
|
||||
http.Error(writer, "Internal error", http.StatusNotFound)
|
||||
|
||||
return
|
||||
}
|
||||
log.Error().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String())
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
log.Debug().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Str("node", node.Hostname).
|
||||
Int("cap_ver", int(mapRequest.Version)).
|
||||
Msg("A node sending a MapRequest with Noise protocol")
|
||||
|
||||
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest)
|
||||
}
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/ctxkey"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -90,6 +91,25 @@ func (i StringList) Value() (driver.Value, error) {
|
|||
|
||||
type StateUpdateType int
|
||||
|
||||
func (su StateUpdateType) String() string {
|
||||
switch su {
|
||||
case StateFullUpdate:
|
||||
return "StateFullUpdate"
|
||||
case StatePeerChanged:
|
||||
return "StatePeerChanged"
|
||||
case StatePeerChangedPatch:
|
||||
return "StatePeerChangedPatch"
|
||||
case StatePeerRemoved:
|
||||
return "StatePeerRemoved"
|
||||
case StateSelfUpdate:
|
||||
return "StateSelfUpdate"
|
||||
case StateDERPUpdated:
|
||||
return "StateDERPUpdated"
|
||||
}
|
||||
|
||||
return "unknown state update type"
|
||||
}
|
||||
|
||||
const (
|
||||
StateFullUpdate StateUpdateType = iota
|
||||
// StatePeerChanged is used for updates that needs
|
||||
|
@ -118,7 +138,7 @@ type StateUpdate struct {
|
|||
// ChangeNodes must be set when Type is StatePeerAdded
|
||||
// and StatePeerChanged and contains the full node
|
||||
// object for added nodes.
|
||||
ChangeNodes Nodes
|
||||
ChangeNodes []NodeID
|
||||
|
||||
// ChangePatches must be set when Type is StatePeerChangedPatch
|
||||
// and contains a populated PeerChange object.
|
||||
|
@ -127,7 +147,7 @@ type StateUpdate struct {
|
|||
// Removed must be set when Type is StatePeerRemoved and
|
||||
// contain a list of the nodes that has been removed from
|
||||
// the network.
|
||||
Removed []tailcfg.NodeID
|
||||
Removed []NodeID
|
||||
|
||||
// DERPMap must be set when Type is StateDERPUpdated and
|
||||
// contain the new DERP Map.
|
||||
|
@ -138,39 +158,6 @@ type StateUpdate struct {
|
|||
Message string
|
||||
}
|
||||
|
||||
// Valid reports if a StateUpdate is correctly filled and
|
||||
// panics if the mandatory fields for a type is not
|
||||
// filled.
|
||||
// Reports true if valid.
|
||||
func (su *StateUpdate) Valid() bool {
|
||||
switch su.Type {
|
||||
case StatePeerChanged:
|
||||
if su.ChangeNodes == nil {
|
||||
panic("Mandatory field ChangeNodes is not set on StatePeerChanged update")
|
||||
}
|
||||
case StatePeerChangedPatch:
|
||||
if su.ChangePatches == nil {
|
||||
panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update")
|
||||
}
|
||||
case StatePeerRemoved:
|
||||
if su.Removed == nil {
|
||||
panic("Mandatory field Removed is not set on StatePeerRemove update")
|
||||
}
|
||||
case StateSelfUpdate:
|
||||
if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 {
|
||||
panic(
|
||||
"Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node",
|
||||
)
|
||||
}
|
||||
case StateDERPUpdated:
|
||||
if su.DERPMap == nil {
|
||||
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Empty reports if there are any updates in the StateUpdate.
|
||||
func (su *StateUpdate) Empty() bool {
|
||||
switch su.Type {
|
||||
|
@ -185,22 +172,26 @@ func (su *StateUpdate) Empty() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func StateUpdateExpire(nodeID uint64, expiry time.Time) StateUpdate {
|
||||
func StateUpdateExpire(nodeID NodeID, expiry time.Time) StateUpdate {
|
||||
return StateUpdate{
|
||||
Type: StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: tailcfg.NodeID(nodeID),
|
||||
NodeID: nodeID.NodeID(),
|
||||
KeyExpiry: &expiry,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
NotifyOriginKey = ctxkey.New("notify.origin", "")
|
||||
NotifyHostnameKey = ctxkey.New("notify.hostname", "")
|
||||
)
|
||||
|
||||
func NotifyCtx(ctx context.Context, origin, hostname string) context.Context {
|
||||
ctx2, _ := context.WithTimeout(
|
||||
context.WithValue(context.WithValue(ctx, "hostname", hostname), "origin", origin),
|
||||
3*time.Second,
|
||||
)
|
||||
ctx2, _ := context.WithTimeout(ctx, 3*time.Second)
|
||||
ctx2 = NotifyOriginKey.WithValue(ctx2, origin)
|
||||
ctx2 = NotifyHostnameKey.WithValue(ctx2, hostname)
|
||||
return ctx2
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue