Compare commits

...

5 commits

Author SHA1 Message Date
Kristoffer Dalby
58b20ea6ea
Merge 50165ce9e1 into e7245856c5 2024-11-13 16:07:56 -08:00
nblock
e7245856c5
Refresh remote CLI documentation (#2216)
Some checks failed
Build documentation / deploy (push) Has been cancelled
Build / build (push) Has been cancelled
Build documentation / build (push) Has been cancelled
Tests / test (push) Has been cancelled
* Document to either use a minimal configuration file or environment
  variables to connect with a remote headscale instance.
* Document a workaround specific for headscale 0.23.0.
* Remove reference to ancient headscale version.
* Use `cli.insecure: true` or `HEADSCALE_CLI_INSECURE=1` to skip
  certificate verification.
* Style and typo fixes

Ref: #2193
2024-11-13 18:35:42 +01:00
nblock
2345c38e1e
Add a page for third-party tools (#2217)
Some checks are pending
Build / build (push) Waiting to run
Build documentation / build (push) Waiting to run
Build documentation / deploy (push) Blocked by required conditions
Tests / test (push) Waiting to run
* Remove status from web-ui docs

Rename the title to indicate that there multiple web interfaces
available. Do not track the status of each web interface here as their
status is subject to change over time.

* Add page for third-party tools and scripts
2024-11-12 16:53:30 +01:00
github-actions[bot]
8cfaa6bdac
flake.lock: Update (#2222) 2024-11-12 13:27:49 +00:00
Kristoffer Dalby
50165ce9e1
resolve user identifier to stable ID
currently, the policy approach node to user matching
with a quite naive approach looking at the username
provided in the policy and matched it with the username
on the nodes. This worked ok as long as usernames were
unique and did not change.

As usernames are no longer guarenteed to be unique in
an OIDC environment we cant rely on this.

This changes the mechanism that matches the user string
(now user token) with nodes:

- first find all potential users by looking up:
  - database ID
  - provider ID (OIDC)
  - username/email

If more than one user is matching, then the query is
rejected, and zero matching nodes are returned.

When a single user is found, the node is matched against
the User database ID, which are also present on the actual
node.

This means that from this commit, users can use the following
to identify users in the policy:
- provider identity (iss + sub)
- username
- email
- database id

There are more changes coming to this, so it is not recommended
to start using any of these new abilities, with the exception
of email, which will not change since it includes an @.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-23 22:31:37 -05:00
15 changed files with 513 additions and 179 deletions

View file

@ -1,2 +1,3 @@
.github/workflows/test-integration-v2*
docs/about/features.md
docs/ref/remote-cli.md

View file

@ -0,0 +1,12 @@
# Tools related to headscale
!!! warning "Community contributions"
This page contains community contributions. The projects listed here are not
maintained by the headscale authors and are written by community members.
This page collects third-party tools and scripts related to headscale.
| Name | Repository Link | Description |
| ----------------- | --------------------------------------------------------------- | ------------------------------------------------- |
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |

View file

@ -1,17 +1,19 @@
# Headscale web interface
# Web interfaces for headscale
!!! warning "Community contributions"
This page contains community contributions. The projects listed here are not
maintained by the headscale authors and are written by community members.
| Name | Repository Link | Description | Status |
| --------------- | ------------------------------------------------------- | ----------------------------------------------------------------------------------- | ------ |
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple headscale web UI for small-scale deployments. | Alpha |
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha |
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha |
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale | Alpha |
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale | Beta |
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins | Stable |
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
| Name | Repository Link | Description |
| --------------- | ------------------------------------------------------- | ----------------------------------------------------------------------------------- |
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple headscale web UI for small-scale deployments. |
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required |
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
You can ask for support on our dedicated [Discord channel](https://discord.com/channels/896711691637780480/1105842846386356294).

View file

@ -1,22 +1,21 @@
# Controlling headscale with remote CLI
This documentation has the goal of showing a user how-to set control a headscale instance
This documentation has the goal of showing a user how-to control a headscale instance
from a remote machine with the `headscale` command line binary.
## Prerequisite
- A workstation to run headscale (could be Linux, macOS, other supported platforms)
- A headscale server (version `0.13.0` or newer)
- Access to create API keys (local access to the headscale server)
- headscale _must_ be served over TLS/HTTPS
- Remote access does _not_ support unencrypted traffic.
- Port `50443` must be open in the firewall (or port overridden by `grpc_listen_addr` option)
- A workstation to run `headscale` (any supported platform, e.g. Linux).
- A headscale server with gRPC enabled.
- Connections to the gRPC port (default: `50443`) are allowed.
- Remote access requires an encrypted connection via TLS.
- An API key to authenticate with the headscale server.
## Create an API key
We need to create an API key to authenticate our remote headscale when using it from our workstation.
We need to create an API key to authenticate with the remote headscale server when using it from our workstation.
To create a API key, log into your headscale server and generate a key:
To create an API key, log into your headscale server and generate a key:
```shell
headscale apikeys create --expiration 90d
@ -25,7 +24,7 @@ headscale apikeys create --expiration 90d
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
if the key is lost, expire the old one, and create a new key.
To list the keys currently assosicated with the server:
To list the keys currently associated with the server:
```shell
headscale apikeys list
@ -39,7 +38,8 @@ headscale apikeys expire --prefix "<PREFIX>"
## Download and configure headscale
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
sure to use the same version as on the server.
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
@ -49,25 +49,32 @@ headscale apikeys expire --prefix "<PREFIX>"
chmod +x /usr/local/bin/headscale
```
1. Configure the CLI through environment variables
1. Provide the connection parameters for the remote headscale server either via a minimal YAML configuration file or via
environment variables:
```shell
export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>"
export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>"
=== "Minimal YAML configuration file"
```yaml
cli:
address: <HEADSCALE_ADDRESS>:<PORT>
api_key: <API_KEY_FROM_PREVIOUS_STEP>
```
for example:
=== "Environment variables"
```shell
export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443"
export HEADSCALE_CLI_API_KEY="abcde12345"
export HEADSCALE_CLI_ADDRESS="<HEADSCALE_ADDRESS>:<PORT>"
export HEADSCALE_CLI_API_KEY="<API_KEY_FROM_PREVIOUS_STEP>"
```
This will tell the `headscale` binary to connect to a remote instance, instead of looking
for a local instance (which is what it does on the server).
!!! bug
The API key is needed to make sure that you are allowed to access the server. The key is _not_
needed when running directly on the server, as the connection is local.
Headscale 0.23.0 requires at least an empty configuration file when environment variables are used to
specify connection details. See [issue 2193](https://github.com/juanfont/headscale/issues/2193) for more
information.
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
connecting to the local instance.
1. Test the connection
@ -89,10 +96,10 @@ While this is _not a supported_ feature, an example on how this can be set up on
## Troubleshooting
Checklist:
- Make sure you have the _same_ headscale version on your server and workstation
- Make sure you use version `0.13.0` or newer.
- Verify that your TLS certificate is valid and trusted
- If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environment
- Make sure you have the _same_ headscale version on your server and workstation.
- Ensure that connections to the gRPC port are allowed.
- Verify that your TLS certificate is valid and trusted.
- If you don't have access to a trusted certificate (e.g. from Let's Encrypt), either:
- Add your self-signed certificate to the trust store of your OS _or_
- Disable certificate verification by either setting `cli.insecure: true` in the configuration file or by setting
`HEADSCALE_CLI_INSECURE=1` via an environment variable. We do **not** recommend to disable certificate validation.

View file

@ -28,7 +28,7 @@ development version.
## Fedora, RHEL, CentOS
A 3rd-party repository for various RPM based distributions is available at:
A third-party repository for various RPM based distributions is available at:
<https://copr.fedorainfracloud.org/coprs/jonathanspw/headscale/>. The site provides detailed setup and installation
instructions.

View file

@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1729850857,
"narHash": "sha256-WvLXzNNnnw+qpFOmgaM3JUlNEH+T4s22b5i2oyyCpXE=",
"lastModified": 1730958623,
"narHash": "sha256-JwQZIGSYnRNOgDDoIgqKITrPVil+RMWHsZH1eE1VGN0=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "41dea55321e5a999b17033296ac05fe8a8b5a257",
"rev": "85f7e662eda4fa3a995556527c87b2524b691933",
"type": "github"
},
"original": {

View file

@ -1027,14 +1027,18 @@ func (h *Headscale) loadACLPolicy() error {
if err != nil {
return fmt.Errorf("loading nodes from database to validate policy: %w", err)
}
users, err := h.db.ListUsers()
if err != nil {
return fmt.Errorf("loading users from database to validate policy: %w", err)
}
_, err = pol.CompileFilterRules(nodes)
_, err = pol.CompileFilterRules(users, nodes)
if err != nil {
return fmt.Errorf("verifying policy rules: %w", err)
}
if len(nodes) > 0 {
_, err = pol.CompileSSHPolicy(nodes[0], nodes)
_, err = pol.CompileSSHPolicy(nodes[0], users, nodes)
if err != nil {
return fmt.Errorf("verifying SSH rules: %w", err)
}

View file

@ -255,10 +255,10 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(len(testPeers), check.Equals, 9)
adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers)
adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers, []types.User{*stor[0].user, *stor[1].user})
c.Assert(err, check.IsNil)
testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers)
testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers, []types.User{*stor[0].user, *stor[1].user})
c.Assert(err, check.IsNil)
peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules)

View file

@ -648,8 +648,13 @@ func EnableAutoApprovedRoutes(
if approvedAlias == node.User.Username() {
approvedRoutes = append(approvedRoutes, advertisedRoute)
} else {
users, err := ListUsers(tx)
if err != nil {
return fmt.Errorf("looking up users to expand route alias: %w", err)
}
// TODO(kradalby): figure out how to get this to depend on less stuff
approvedIps, err := aclPolicy.ExpandAlias(types.Nodes{node}, approvedAlias)
approvedIps, err := aclPolicy.ExpandAlias(types.Nodes{node}, users, approvedAlias)
if err != nil {
return fmt.Errorf("expanding alias %q for autoApprovers: %w", approvedAlias, err)
}

View file

@ -737,14 +737,18 @@ func (api headscaleV1APIServer) SetPolicy(
if err != nil {
return nil, fmt.Errorf("loading nodes from database to validate policy: %w", err)
}
users, err := api.h.db.ListUsers()
if err != nil {
return nil, fmt.Errorf("loading users from database to validate policy: %w", err)
}
_, err = pol.CompileFilterRules(nodes)
_, err = pol.CompileFilterRules(users, nodes)
if err != nil {
return nil, fmt.Errorf("verifying policy rules: %w", err)
}
if len(nodes) > 0 {
_, err = pol.CompileSSHPolicy(nodes[0], nodes)
_, err = pol.CompileSSHPolicy(nodes[0], users, nodes)
if err != nil {
return nil, fmt.Errorf("verifying SSH rules: %w", err)
}

View file

@ -153,6 +153,7 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
func (m *Mapper) fullMapResponse(
node *types.Node,
peers types.Nodes,
users []types.User,
pol *policy.ACLPolicy,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
@ -167,6 +168,7 @@ func (m *Mapper) fullMapResponse(
pol,
node,
capVer,
users,
peers,
peers,
m.cfg,
@ -189,8 +191,12 @@ func (m *Mapper) FullMapResponse(
if err != nil {
return nil, err
}
users, err := m.db.ListUsers()
if err != nil {
return nil, err
}
resp, err := m.fullMapResponse(node, peers, pol, mapRequest.Version)
resp, err := m.fullMapResponse(node, peers, users, pol, mapRequest.Version)
if err != nil {
return nil, err
}
@ -253,6 +259,11 @@ func (m *Mapper) PeerChangedResponse(
return nil, err
}
users, err := m.db.ListUsers()
if err != nil {
return nil, fmt.Errorf("listing users for map response: %w", err)
}
var removedIDs []tailcfg.NodeID
var changedIDs []types.NodeID
for nodeID, nodeChanged := range changed {
@ -276,6 +287,7 @@ func (m *Mapper) PeerChangedResponse(
pol,
node,
mapRequest.Version,
users,
peers,
changedNodes,
m.cfg,
@ -508,16 +520,17 @@ func appendPeerChanges(
pol *policy.ACLPolicy,
node *types.Node,
capVer tailcfg.CapabilityVersion,
users []types.User,
peers types.Nodes,
changed types.Nodes,
cfg *types.Config,
) error {
packetFilter, err := pol.CompileFilterRules(append(peers, node))
packetFilter, err := pol.CompileFilterRules(users, append(peers, node))
if err != nil {
return err
}
sshPolicy, err := pol.CompileSSHPolicy(node, peers)
sshPolicy, err := pol.CompileSSHPolicy(node, users, peers)
if err != nil {
return err
}

View file

@ -159,6 +159,9 @@ func Test_fullMapResponse(t *testing.T) {
lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
user1 := types.User{Model: gorm.Model{ID: 0}, Name: "mini"}
user2 := types.User{Model: gorm.Model{ID: 1}, Name: "peer2"}
mini := &types.Node{
ID: 0,
MachineKey: mustMK(
@ -173,8 +176,8 @@ func Test_fullMapResponse(t *testing.T) {
IPv4: iap("100.64.0.1"),
Hostname: "mini",
GivenName: "mini",
UserID: 0,
User: types.User{Name: "mini"},
UserID: user1.ID,
User: user1,
ForcedTags: []string{},
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
@ -253,8 +256,8 @@ func Test_fullMapResponse(t *testing.T) {
IPv4: iap("100.64.0.2"),
Hostname: "peer1",
GivenName: "peer1",
UserID: 0,
User: types.User{Name: "mini"},
UserID: user1.ID,
User: user1,
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
@ -308,8 +311,8 @@ func Test_fullMapResponse(t *testing.T) {
IPv4: iap("100.64.0.3"),
Hostname: "peer2",
GivenName: "peer2",
UserID: 1,
User: types.User{Name: "peer2"},
UserID: user2.ID,
User: user2,
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
@ -468,6 +471,7 @@ func Test_fullMapResponse(t *testing.T) {
got, err := mappy.fullMapResponse(
tt.node,
tt.peers,
[]types.User{user1, user2},
tt.pol,
0,
)

View file

@ -137,20 +137,21 @@ func GenerateFilterAndSSHRulesForTests(
policy *ACLPolicy,
node *types.Node,
peers types.Nodes,
users []types.User,
) ([]tailcfg.FilterRule, *tailcfg.SSHPolicy, error) {
// If there is no policy defined, we default to allow all
if policy == nil {
return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil
}
rules, err := policy.CompileFilterRules(append(peers, node))
rules, err := policy.CompileFilterRules(users, append(peers, node))
if err != nil {
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
}
log.Trace().Interface("ACL", rules).Str("node", node.GivenName).Msg("ACL rules")
sshPolicy, err := policy.CompileSSHPolicy(node, peers)
sshPolicy, err := policy.CompileSSHPolicy(node, users, peers)
if err != nil {
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
}
@ -161,6 +162,7 @@ func GenerateFilterAndSSHRulesForTests(
// CompileFilterRules takes a set of nodes and an ACLPolicy and generates a
// set of Tailscale compatible FilterRules used to allow traffic on clients.
func (pol *ACLPolicy) CompileFilterRules(
users []types.User,
nodes types.Nodes,
) ([]tailcfg.FilterRule, error) {
if pol == nil {
@ -176,9 +178,14 @@ func (pol *ACLPolicy) CompileFilterRules(
var srcIPs []string
for srcIndex, src := range acl.Sources {
srcs, err := pol.expandSource(src, nodes)
srcs, err := pol.expandSource(src, users, nodes)
if err != nil {
return nil, fmt.Errorf("parsing policy, acl index: %d->%d: %w", index, srcIndex, err)
return nil, fmt.Errorf(
"parsing policy, acl index: %d->%d: %w",
index,
srcIndex,
err,
)
}
srcIPs = append(srcIPs, srcs...)
}
@ -197,6 +204,7 @@ func (pol *ACLPolicy) CompileFilterRules(
expanded, err := pol.ExpandAlias(
nodes,
users,
alias,
)
if err != nil {
@ -281,6 +289,7 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
func (pol *ACLPolicy) CompileSSHPolicy(
node *types.Node,
users []types.User,
peers types.Nodes,
) (*tailcfg.SSHPolicy, error) {
if pol == nil {
@ -312,7 +321,7 @@ func (pol *ACLPolicy) CompileSSHPolicy(
for index, sshACL := range pol.SSHs {
var dest netipx.IPSetBuilder
for _, src := range sshACL.Destinations {
expanded, err := pol.ExpandAlias(append(peers, node), src)
expanded, err := pol.ExpandAlias(append(peers, node), users, src)
if err != nil {
return nil, err
}
@ -335,12 +344,21 @@ func (pol *ACLPolicy) CompileSSHPolicy(
case "check":
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
if err != nil {
return nil, fmt.Errorf("parsing SSH policy, parsing check duration, index: %d: %w", index, err)
return nil, fmt.Errorf(
"parsing SSH policy, parsing check duration, index: %d: %w",
index,
err,
)
} else {
action = *checkAction
}
default:
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", sshACL.Action, index, err)
return nil, fmt.Errorf(
"parsing SSH policy, unknown action %q, index: %d: %w",
sshACL.Action,
index,
err,
)
}
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
@ -363,6 +381,7 @@ func (pol *ACLPolicy) CompileSSHPolicy(
} else {
expandedSrcs, err := pol.ExpandAlias(
peers,
users,
rawSrc,
)
if err != nil {
@ -512,9 +531,10 @@ func parseProtocol(protocol string) ([]int, bool, error) {
// with the given src alias.
func (pol *ACLPolicy) expandSource(
src string,
users []types.User,
nodes types.Nodes,
) ([]string, error) {
ipSet, err := pol.ExpandAlias(nodes, src)
ipSet, err := pol.ExpandAlias(nodes, users, src)
if err != nil {
return []string{}, err
}
@ -538,6 +558,7 @@ func (pol *ACLPolicy) expandSource(
// and transform these in IPAddresses.
func (pol *ACLPolicy) ExpandAlias(
nodes types.Nodes,
users []types.User,
alias string,
) (*netipx.IPSet, error) {
if isWildcard(alias) {
@ -552,12 +573,12 @@ func (pol *ACLPolicy) ExpandAlias(
// if alias is a group
if isGroup(alias) {
return pol.expandIPsFromGroup(alias, nodes)
return pol.expandIPsFromGroup(alias, users, nodes)
}
// if alias is a tag
if isTag(alias) {
return pol.expandIPsFromTag(alias, nodes)
return pol.expandIPsFromTag(alias, users, nodes)
}
if isAutoGroup(alias) {
@ -565,7 +586,7 @@ func (pol *ACLPolicy) ExpandAlias(
}
// if alias is a user
if ips, err := pol.expandIPsFromUser(alias, nodes); ips != nil {
if ips, err := pol.expandIPsFromUser(alias, users, nodes); ips != nil {
return ips, err
}
@ -574,7 +595,7 @@ func (pol *ACLPolicy) ExpandAlias(
if h, ok := pol.Hosts[alias]; ok {
log.Trace().Str("host", h.String()).Msg("ExpandAlias got hosts entry")
return pol.ExpandAlias(nodes, h.String())
return pol.ExpandAlias(nodes, users, h.String())
}
// if alias is an IP
@ -751,16 +772,17 @@ func (pol *ACLPolicy) expandUsersFromGroup(
func (pol *ACLPolicy) expandIPsFromGroup(
group string,
users []types.User,
nodes types.Nodes,
) (*netipx.IPSet, error) {
var build netipx.IPSetBuilder
users, err := pol.expandUsersFromGroup(group)
userTokens, err := pol.expandUsersFromGroup(group)
if err != nil {
return &netipx.IPSet{}, err
}
for _, user := range users {
filteredNodes := filterNodesByUser(nodes, user)
for _, user := range userTokens {
filteredNodes := filterNodesByUser(nodes, users, user)
for _, node := range filteredNodes {
node.AppendToIPSet(&build)
}
@ -771,6 +793,7 @@ func (pol *ACLPolicy) expandIPsFromGroup(
func (pol *ACLPolicy) expandIPsFromTag(
alias string,
users []types.User,
nodes types.Nodes,
) (*netipx.IPSet, error) {
var build netipx.IPSetBuilder
@ -803,7 +826,7 @@ func (pol *ACLPolicy) expandIPsFromTag(
// filter out nodes per tag owner
for _, user := range owners {
nodes := filterNodesByUser(nodes, user)
nodes := filterNodesByUser(nodes, users, user)
for _, node := range nodes {
if node.Hostinfo == nil {
continue
@ -820,11 +843,12 @@ func (pol *ACLPolicy) expandIPsFromTag(
func (pol *ACLPolicy) expandIPsFromUser(
user string,
users []types.User,
nodes types.Nodes,
) (*netipx.IPSet, error) {
var build netipx.IPSetBuilder
filteredNodes := filterNodesByUser(nodes, user)
filteredNodes := filterNodesByUser(nodes, users, user)
filteredNodes = excludeCorrectlyTaggedNodes(pol, filteredNodes, user)
// shortcurcuit if we have no nodes to get ips from.
@ -953,10 +977,40 @@ func (pol *ACLPolicy) TagsOfNode(
return validTags, invalidTags
}
func filterNodesByUser(nodes types.Nodes, user string) types.Nodes {
// filterNodesByUser returns a list of nodes that match the given userToken from a
// policy.
// Matching nodes are determined by first matching the user token to a user by checking:
// - If it is an ID that mactches the user database ID
// - It is the Provider Identifier from OIDC
// - It matches the username or email of a user
//
// If the token matches more than one user, zero nodes will returned.
func filterNodesByUser(nodes types.Nodes, users []types.User, userToken string) types.Nodes {
var out types.Nodes
var potentialUsers []types.User
for _, user := range users {
if user.ProviderIdentifier == userToken {
potentialUsers = append(potentialUsers, user)
break
}
if user.Email == userToken {
potentialUsers = append(potentialUsers, user)
}
if user.Name == userToken {
potentialUsers = append(potentialUsers, user)
}
}
if len(potentialUsers) != 1 {
return nil
}
user := potentialUsers[0]
for _, node := range nodes {
if node.User.Username() == user {
if node.User.ID == user.ID {
out = append(out, node)
}
}

View file

@ -2,8 +2,10 @@ package policy
import (
"errors"
"math/rand/v2"
"net/netip"
"slices"
"sort"
"testing"
"github.com/google/go-cmp/cmp"
@ -14,6 +16,7 @@ import (
"github.com/stretchr/testify/assert"
"go4.org/netipx"
"gopkg.in/check.v1"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
)
@ -375,15 +378,21 @@ func TestParsing(t *testing.T) {
return
}
rules, err := pol.CompileFilterRules(types.Nodes{
user := types.User{
Model: gorm.Model{ID: 1},
Name: "testuser",
}
rules, err := pol.CompileFilterRules(
[]types.User{
user,
},
types.Nodes{
&types.Node{
IPv4: iap("100.100.100.100"),
},
&types.Node{
IPv4: iap("200.200.200.200"),
User: types.User{
Name: "testuser",
},
User: user,
Hostinfo: &tailcfg.Hostinfo{},
},
})
@ -533,7 +542,7 @@ func (s *Suite) TestRuleInvalidGeneration(c *check.C) {
c.Assert(pol.ACLs, check.HasLen, 6)
c.Assert(err, check.IsNil)
rules, err := pol.CompileFilterRules(types.Nodes{})
rules, err := pol.CompileFilterRules([]types.User{}, types.Nodes{})
c.Assert(err, check.NotNil)
c.Assert(rules, check.IsNil)
}
@ -549,7 +558,7 @@ func (s *Suite) TestInvalidAction(c *check.C) {
},
},
}
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{})
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}, []types.User{})
c.Assert(errors.Is(err, ErrInvalidAction), check.Equals, true)
}
@ -568,7 +577,7 @@ func (s *Suite) TestInvalidGroupInGroup(c *check.C) {
},
},
}
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{})
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}, []types.User{})
c.Assert(errors.Is(err, ErrInvalidGroup), check.Equals, true)
}
@ -584,7 +593,7 @@ func (s *Suite) TestInvalidTagOwners(c *check.C) {
},
}
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{})
_, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}, []types.User{})
c.Assert(errors.Is(err, ErrInvalidTag), check.Equals, true)
}
@ -861,6 +870,14 @@ func Test_expandPorts(t *testing.T) {
}
func Test_listNodesInUser(t *testing.T) {
users := []types.User{
{Model: gorm.Model{ID: 1}, Name: "marc"},
{Model: gorm.Model{ID: 2}, Name: "joe", Email: "joe@headscale.net"},
{Model: gorm.Model{ID: 3}, Name: "mikael", Email: "mikael@headscale.net", ProviderIdentifier: "http://oidc.org/1234"},
{Model: gorm.Model{ID: 4}, Name: "mikael2", Email: "mikael@headscale.net"},
{Model: gorm.Model{ID: 5}, Name: "mikael", Email: "mikael2@headscale.net"},
}
type args struct {
nodes types.Nodes
user string
@ -874,50 +891,239 @@ func Test_listNodesInUser(t *testing.T) {
name: "1 node in user",
args: args{
nodes: types.Nodes{
&types.Node{User: types.User{Name: "joe"}},
&types.Node{User: users[1]},
},
user: "joe",
},
want: types.Nodes{
&types.Node{User: types.User{Name: "joe"}},
&types.Node{User: users[1]},
},
},
{
name: "3 nodes, 2 in user",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: types.User{Name: "joe"}},
&types.Node{ID: 2, User: types.User{Name: "marc"}},
&types.Node{ID: 3, User: types.User{Name: "marc"}},
&types.Node{ID: 1, User: users[1]},
&types.Node{ID: 2, User: users[0]},
&types.Node{ID: 3, User: users[0]},
},
user: "marc",
},
want: types.Nodes{
&types.Node{ID: 2, User: types.User{Name: "marc"}},
&types.Node{ID: 3, User: types.User{Name: "marc"}},
&types.Node{ID: 2, User: users[0]},
&types.Node{ID: 3, User: users[0]},
},
},
{
name: "5 nodes, 0 in user",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: types.User{Name: "joe"}},
&types.Node{ID: 2, User: types.User{Name: "marc"}},
&types.Node{ID: 3, User: types.User{Name: "marc"}},
&types.Node{ID: 4, User: types.User{Name: "marc"}},
&types.Node{ID: 5, User: types.User{Name: "marc"}},
&types.Node{ID: 1, User: users[1]},
&types.Node{ID: 2, User: users[0]},
&types.Node{ID: 3, User: users[0]},
&types.Node{ID: 4, User: users[0]},
&types.Node{ID: 5, User: users[0]},
},
user: "mickael",
},
want: nil,
},
{
name: "match-by-provider-ident",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[1]},
&types.Node{ID: 2, User: users[2]},
},
user: "http://oidc.org/1234",
},
want: types.Nodes{
&types.Node{ID: 2, User: users[2]},
},
},
{
name: "match-by-email",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[1]},
&types.Node{ID: 2, User: users[2]},
},
user: "joe@headscale.net",
},
want: types.Nodes{
&types.Node{ID: 1, User: users[1]},
},
},
{
name: "multi-match-is-zero",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[1]},
&types.Node{ID: 2, User: users[2]},
&types.Node{ID: 3, User: users[3]},
},
user: "mikael@headscale.net",
},
want: nil,
},
{
name: "multi-email-first-match-is-zero",
args: args{
nodes: types.Nodes{
// First match email, then provider id
&types.Node{ID: 3, User: users[3]},
&types.Node{ID: 2, User: users[2]},
},
user: "mikael@headscale.net",
},
want: nil,
},
{
name: "multi-username-first-match-is-zero",
args: args{
nodes: types.Nodes{
// First match username, then provider id
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 2, User: users[2]},
},
user: "mikael",
},
want: nil,
},
{
name: "all-users-duplicate-username-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "mikael",
},
want: nil,
},
{
name: "all-users-unique-username-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "marc",
},
want: types.Nodes{
&types.Node{ID: 1, User: users[0]},
},
},
{
name: "all-users-no-username-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "not-working",
},
want: nil,
},
{
name: "all-users-duplicate-email-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "mikael@headscale.net",
},
want: nil,
},
{
name: "all-users-duplicate-email-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "joe@headscale.net",
},
want: types.Nodes{
&types.Node{ID: 2, User: users[1]},
},
},
{
name: "all-users-no-email-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "not-working@headscale.net",
},
want: nil,
},
{
name: "all-users-provider-id-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "http://oidc.org/1234",
},
want: types.Nodes{
&types.Node{ID: 3, User: users[2]},
},
},
{
name: "all-users-no-provider-id-random-order",
args: args{
nodes: types.Nodes{
&types.Node{ID: 1, User: users[0]},
&types.Node{ID: 2, User: users[1]},
&types.Node{ID: 3, User: users[2]},
&types.Node{ID: 4, User: users[3]},
&types.Node{ID: 5, User: users[4]},
},
user: "http://oidc.org/4321",
},
want: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got := filterNodesByUser(test.args.nodes, test.args.user)
for range 1000 {
ns := test.args.nodes
rand.Shuffle(len(ns), func(i, j int) {
ns[i], ns[j] = ns[j], ns[i]
})
got := filterNodesByUser(ns, users, test.args.user)
sort.Slice(got, func(i, j int) bool {
return got[i].ID < got[j].ID
})
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" {
t.Errorf("listNodesInUser() = (-want +got):\n%s", diff)
t.Errorf("filterNodesByUser() = (-want +got):\n%s", diff)
}
}
})
}
@ -940,6 +1146,12 @@ func Test_expandAlias(t *testing.T) {
return s
}
users := []types.User{
{Model: gorm.Model{ID: 1}, Name: "joe"},
{Model: gorm.Model{ID: 2}, Name: "marc"},
{Model: gorm.Model{ID: 3}, Name: "mickael"},
}
type field struct {
pol ACLPolicy
}
@ -989,19 +1201,19 @@ func Test_expandAlias(t *testing.T) {
nodes: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.1"),
User: types.User{Name: "joe"},
User: users[0],
},
&types.Node{
IPv4: iap("100.64.0.2"),
User: types.User{Name: "joe"},
User: users[0],
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "mickael"},
User: users[2],
},
},
},
@ -1022,19 +1234,19 @@ func Test_expandAlias(t *testing.T) {
nodes: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.1"),
User: types.User{Name: "joe"},
User: users[0],
},
&types.Node{
IPv4: iap("100.64.0.2"),
User: types.User{Name: "joe"},
User: users[0],
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "mickael"},
User: users[2],
},
},
},
@ -1185,7 +1397,7 @@ func Test_expandAlias(t *testing.T) {
nodes: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.1"),
User: types.User{Name: "joe"},
User: users[0],
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
@ -1194,7 +1406,7 @@ func Test_expandAlias(t *testing.T) {
},
&types.Node{
IPv4: iap("100.64.0.2"),
User: types.User{Name: "joe"},
User: users[0],
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
@ -1203,11 +1415,11 @@ func Test_expandAlias(t *testing.T) {
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "joe"},
User: users[0],
},
},
},
@ -1260,21 +1472,21 @@ func Test_expandAlias(t *testing.T) {
nodes: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.1"),
User: types.User{Name: "joe"},
User: users[0],
ForcedTags: []string{"tag:hr-webserver"},
},
&types.Node{
IPv4: iap("100.64.0.2"),
User: types.User{Name: "joe"},
User: users[0],
ForcedTags: []string{"tag:hr-webserver"},
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "mickael"},
User: users[2],
},
},
},
@ -1295,12 +1507,12 @@ func Test_expandAlias(t *testing.T) {
nodes: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.1"),
User: types.User{Name: "joe"},
User: users[0],
ForcedTags: []string{"tag:hr-webserver"},
},
&types.Node{
IPv4: iap("100.64.0.2"),
User: types.User{Name: "joe"},
User: users[0],
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
@ -1309,11 +1521,11 @@ func Test_expandAlias(t *testing.T) {
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "mickael"},
User: users[2],
},
},
},
@ -1350,12 +1562,12 @@ func Test_expandAlias(t *testing.T) {
},
&types.Node{
IPv4: iap("100.64.0.3"),
User: types.User{Name: "marc"},
User: users[1],
Hostinfo: &tailcfg.Hostinfo{},
},
&types.Node{
IPv4: iap("100.64.0.4"),
User: types.User{Name: "joe"},
User: users[0],
Hostinfo: &tailcfg.Hostinfo{},
},
},
@ -1368,6 +1580,7 @@ func Test_expandAlias(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
got, err := test.field.pol.ExpandAlias(
test.args.nodes,
users,
test.args.alias,
)
if (err != nil) != test.wantErr {
@ -1715,6 +1928,7 @@ func TestACLPolicy_generateFilterRules(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.field.pol.CompileFilterRules(
[]types.User{},
tt.args.nodes,
)
if (err != nil) != tt.wantErr {
@ -1834,6 +2048,13 @@ func TestTheInternet(t *testing.T) {
}
func TestReduceFilterRules(t *testing.T) {
users := []types.User{
{Model: gorm.Model{ID: 1}, Name: "mickael"},
{Model: gorm.Model{ID: 2}, Name: "user1"},
{Model: gorm.Model{ID: 3}, Name: "user2"},
{Model: gorm.Model{ID: 4}, Name: "user100"},
}
tests := []struct {
name string
node *types.Node
@ -1855,13 +2076,13 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
User: types.User{Name: "mickael"},
User: users[0],
},
peers: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
User: types.User{Name: "mickael"},
User: users[0],
},
},
want: []tailcfg.FilterRule{},
@ -1888,7 +2109,7 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
@ -1899,7 +2120,7 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -1967,19 +2188,19 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
peers: types.Nodes{
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user2"},
User: users[2],
},
// "internal" exit node
&types.Node{
IPv4: iap("100.64.0.100"),
IPv6: iap("fd7a:115c:a1e0::100"),
User: types.User{Name: "user100"},
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
@ -2026,12 +2247,12 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user2"},
User: users[2],
},
&types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -2113,7 +2334,7 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.100"),
IPv6: iap("fd7a:115c:a1e0::100"),
User: types.User{Name: "user100"},
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
@ -2122,12 +2343,12 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user2"},
User: users[2],
},
&types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -2215,7 +2436,7 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.100"),
IPv6: iap("fd7a:115c:a1e0::100"),
User: types.User{Name: "user100"},
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
},
@ -2224,12 +2445,12 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user2"},
User: users[2],
},
&types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -2292,7 +2513,7 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.100"),
IPv6: iap("fd7a:115c:a1e0::100"),
User: types.User{Name: "user100"},
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
},
@ -2301,12 +2522,12 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.2"),
IPv6: iap("fd7a:115c:a1e0::2"),
User: types.User{Name: "user2"},
User: users[2],
},
&types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -2362,7 +2583,7 @@ func TestReduceFilterRules(t *testing.T) {
node: &types.Node{
IPv4: iap("100.64.0.100"),
IPv6: iap("fd7a:115c:a1e0::100"),
User: types.User{Name: "user100"},
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
},
@ -2372,7 +2593,7 @@ func TestReduceFilterRules(t *testing.T) {
&types.Node{
IPv4: iap("100.64.0.1"),
IPv6: iap("fd7a:115c:a1e0::1"),
User: types.User{Name: "user1"},
User: users[1],
},
},
want: []tailcfg.FilterRule{
@ -2400,6 +2621,7 @@ func TestReduceFilterRules(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, _ := tt.pol.CompileFilterRules(
users,
append(tt.peers, tt.node),
)
@ -3391,7 +3613,7 @@ func TestSSHRules(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.pol.CompileSSHPolicy(&tt.node, tt.peers)
got, err := tt.pol.CompileSSHPolicy(&tt.node, []types.User{}, tt.peers)
assert.NoError(t, err)
if diff := cmp.Diff(tt.want, got); diff != "" {
@ -3474,14 +3696,17 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
RequestTags: []string{"tag:test"},
}
user := types.User{
Model: gorm.Model{ID: 1},
Name: "user1",
}
node := &types.Node{
ID: 0,
Hostname: "testnodes",
IPv4: iap("100.64.0.1"),
UserID: 0,
User: types.User{
Name: "user1",
},
User: user,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo,
}
@ -3498,7 +3723,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
},
}
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{})
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}, []types.User{user})
assert.NoError(t, err)
want := []tailcfg.FilterRule{
@ -3532,6 +3757,7 @@ func TestInvalidTagValidUser(t *testing.T) {
IPv4: iap("100.64.0.1"),
UserID: 1,
User: types.User{
Model: gorm.Model{ID: 1},
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
@ -3549,7 +3775,7 @@ func TestInvalidTagValidUser(t *testing.T) {
},
}
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{})
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}, []types.User{node.User})
assert.NoError(t, err)
want := []tailcfg.FilterRule{
@ -3583,6 +3809,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
IPv4: iap("100.64.0.1"),
UserID: 1,
User: types.User{
Model: gorm.Model{ID: 1},
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
@ -3608,7 +3835,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
// c.Assert(rules[0].DstPorts, check.HasLen, 1)
// c.Assert(rules[0].DstPorts[0].IP, check.Equals, "100.64.0.1/32")
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{})
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}, []types.User{node.User})
assert.NoError(t, err)
want := []tailcfg.FilterRule{
@ -3637,15 +3864,17 @@ func TestValidTagInvalidUser(t *testing.T) {
Hostname: "webserver",
RequestTags: []string{"tag:webapp"},
}
user := types.User{
Model: gorm.Model{ID: 1},
Name: "user1",
}
node := &types.Node{
ID: 1,
Hostname: "webserver",
IPv4: iap("100.64.0.1"),
UserID: 1,
User: types.User{
Name: "user1",
},
User: user,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo,
}
@ -3660,9 +3889,7 @@ func TestValidTagInvalidUser(t *testing.T) {
Hostname: "user",
IPv4: iap("100.64.0.2"),
UserID: 1,
User: types.User{
Name: "user1",
},
User: user,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo2,
}
@ -3678,7 +3905,7 @@ func TestValidTagInvalidUser(t *testing.T) {
},
}
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{nodes2})
got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{nodes2}, []types.User{user})
assert.NoError(t, err)
want := []tailcfg.FilterRule{

View file

@ -183,3 +183,4 @@ nav:
- Integration:
- Reverse proxy: ref/integration/reverse-proxy.md
- Web UI: ref/integration/web-ui.md
- Tools: ref/integration/tools.md