Compare commits

...

6 commits

Author SHA1 Message Date
Kristoffer Dalby
55ff40108c
Merge 98a65c76d3 into 6275399327 2024-11-19 08:24:50 -06:00
Nathan Sweet
6275399327
Update tls.md to mention using the full cert chain (#2243)
Some checks failed
Build / build (push) Has been cancelled
Build documentation / build (push) Has been cancelled
Tests / test (push) Has been cancelled
Build documentation / deploy (push) Has been cancelled
2024-11-18 06:12:12 +00:00
nblock
29119bb7f4
Misc doc fixes (#2240)
Some checks are pending
Build / build (push) Waiting to run
Build documentation / build (push) Waiting to run
Build documentation / deploy (push) Blocked by required conditions
Tests / test (push) Waiting to run
* Link back to node registration docs
* adjust wording in apple docs
* Mention client specific page to check if headscale works

Ref: #2238
2024-11-18 05:46:58 +01:00
github-actions[bot]
93ba21ede5
flake.lock: Update (#2239)
Some checks are pending
Build / build (push) Waiting to run
Build documentation / build (push) Waiting to run
Build documentation / deploy (push) Blocked by required conditions
Tests / test (push) Waiting to run
2024-11-17 19:38:50 +00:00
Kristoffer Dalby
98a65c76d3
flesh out tests
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-09-13 14:23:41 +01:00
Kristoffer Dalby
1ec99c55e4
add test to reproduce #2129
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-09-13 07:22:24 +02:00
9 changed files with 303 additions and 41 deletions

View file

@ -60,6 +60,7 @@ jobs:
- TestEnableDisableAutoApprovedRoute - TestEnableDisableAutoApprovedRoute
- TestAutoApprovedSubRoute2068 - TestAutoApprovedSubRoute2068
- TestSubnetRouteACL - TestSubnetRouteACL
- TestHASubnetRouterFailoverWhenNodeDisconnects2129
- TestHeadscale - TestHeadscale
- TestCreateTailscale - TestCreateTailscale
- TestTailscaleNodesJoiningHeadcale - TestTailscaleNodesJoiningHeadcale

View file

@ -45,11 +45,11 @@ headscale server.
ACLs have to be written in [huJSON](https://github.com/tailscale/hujson). ACLs have to be written in [huJSON](https://github.com/tailscale/hujson).
When registering the servers we will need to add the flag When [registering the servers](../usage/getting-started.md#register-a-node) we
`--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user that is will need to add the flag `--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user
registering the server should be allowed to do it. Since anyone can add tags to that is registering the server should be allowed to do it. Since anyone can add
a server they can register, the check of the tags is done on headscale server tags to a server they can register, the check of the tags is done on headscale
and only valid tags are applied. A tag is valid if the user that is server and only valid tags are applied. A tag is valid if the user that is
registering it is allowed to do it. registering it is allowed to do it.
To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This will need to point to your ACL file. More info on how these policies are written can be found [here](https://tailscale.com/kb/1018/acls/). To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This will need to point to your ACL file. More info on how these policies are written can be found [here](https://tailscale.com/kb/1018/acls/).

View file

@ -9,6 +9,8 @@ tls_cert_path: ""
tls_key_path: "" tls_key_path: ""
``` ```
The certificate should contain the full chain, else some clients, like the Tailscale Android client, will reject it.
## Let's Encrypt / ACME ## Let's Encrypt / ACME
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.

View file

@ -60,7 +60,7 @@ Install the official Tailscale tvOS client from the [App Store](https://apps.app
### Configuring the headscale URL ### Configuring the headscale URL
- Go Settings (the apple tvOS settings) > Apps > Tailscale - Open Settings (the Apple tvOS settings) > Apps > Tailscale
- Under `ALTERNATE COORDINATION SERVER URL`, select `URL` - Under `ALTERNATE COORDINATION SERVER URL`, select `URL`
- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `OK` - Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `OK`
- Return to the tvOS Home screen - Return to the tvOS Home screen

View file

@ -9,6 +9,8 @@ This page helps you get started with headscale and provides a few usage examples
installation instructions. installation instructions.
* The configuration file exists and is adjusted to suit your environment, see * The configuration file exists and is adjusted to suit your environment, see
[Configuration](../ref/configuration.md) for details. [Configuration](../ref/configuration.md) for details.
* Headscale is reachable from the Internet. Verify this by opening client specific setup instructions in your
browser, e.g. https://headscale.example.com/windows
* The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more * The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more
information. information.

View file

@ -5,11 +5,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1726560853, "lastModified": 1731533236,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -20,11 +20,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1730958623, "lastModified": 1731763621,
"narHash": "sha256-JwQZIGSYnRNOgDDoIgqKITrPVil+RMWHsZH1eE1VGN0=", "narHash": "sha256-ddcX4lQL0X05AYkrkV2LMFgGdRvgap7Ho8kgon3iWZk=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "85f7e662eda4fa3a995556527c87b2524b691933", "rev": "c69a9bffbecde46b4b939465422ddc59493d3e4d",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -191,6 +191,7 @@ func (m *mapSession) serve() {
// //
//nolint:gocyclo //nolint:gocyclo
func (m *mapSession) serveLongPoll() { func (m *mapSession) serveLongPoll() {
start := time.Now()
m.beforeServeLongPoll() m.beforeServeLongPoll()
// Clean up the session when the client disconnects // Clean up the session when the client disconnects
@ -220,16 +221,6 @@ func (m *mapSession) serveLongPoll() {
m.pollFailoverRoutes("node connected", m.node) m.pollFailoverRoutes("node connected", m.node)
// Upgrade the writer to a ResponseController
rc := http.NewResponseController(m.w)
// Longpolling will break if there is a write timeout,
// so it needs to be disabled.
rc.SetWriteDeadline(time.Time{})
ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))
defer cancel()
m.keepAliveTicker = time.NewTicker(m.keepAlive) m.keepAliveTicker = time.NewTicker(m.keepAlive)
m.h.nodeNotifier.AddNode(m.node.ID, m.ch) m.h.nodeNotifier.AddNode(m.node.ID, m.ch)
@ -243,12 +234,12 @@ func (m *mapSession) serveLongPoll() {
// consume channels with update, keep alives or "batch" blocking signals // consume channels with update, keep alives or "batch" blocking signals
select { select {
case <-m.cancelCh: case <-m.cancelCh:
m.tracef("poll cancelled received") m.tracef("poll cancelled received (%s)", time.Since(start).String())
mapResponseEnded.WithLabelValues("cancelled").Inc() mapResponseEnded.WithLabelValues("cancelled").Inc()
return return
case <-ctx.Done(): case <-m.ctx.Done():
m.tracef("poll context done") m.tracef("poll context done (%s): %s", time.Since(start).String(), m.ctx.Err().Error())
mapResponseEnded.WithLabelValues("done").Inc() mapResponseEnded.WithLabelValues("done").Inc()
return return
@ -339,14 +330,7 @@ func (m *mapSession) serveLongPoll() {
m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m) m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m)
return return
} }
m.w.(http.Flusher).Flush()
err = rc.Flush()
if err != nil {
mapResponseSent.WithLabelValues("error", updateType).Inc()
m.errf(err, "flushing the map response to client, for mapSession: %p", m)
return
}
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node") log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
if debugHighCardinalityMetrics { if debugHighCardinalityMetrics {
@ -360,22 +344,17 @@ func (m *mapSession) serveLongPoll() {
case <-m.keepAliveTicker.C: case <-m.keepAliveTicker.C:
data, err := m.mapper.KeepAliveResponse(m.req, m.node) data, err := m.mapper.KeepAliveResponse(m.req, m.node)
if err != nil { if err != nil {
m.errf(err, "Error generating the keep alive msg") m.errf(err, "Error generating the keepalive msg")
mapResponseSent.WithLabelValues("error", "keepalive").Inc() mapResponseSent.WithLabelValues("error", "keepalive").Inc()
return return
} }
_, err = m.w.Write(data) _, err = m.w.Write(data)
if err != nil { if err != nil {
m.errf(err, "Cannot write keep alive message") m.errf(err, "Cannot write keepalive message")
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
return
}
err = rc.Flush()
if err != nil {
m.errf(err, "flushing keep alive to client, for mapSession: %p", m)
mapResponseSent.WithLabelValues("error", "keepalive").Inc() mapResponseSent.WithLabelValues("error", "keepalive").Inc()
return return
} }
m.w.(http.Flusher).Flush()
if debugHighCardinalityMetrics { if debugHighCardinalityMetrics {
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix())) mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))

View file

@ -13,6 +13,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1" v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic" "github.com/juanfont/headscale/integration/tsic"
@ -1316,3 +1317,252 @@ func TestSubnetRouteACL(t *testing.T) {
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff) t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
} }
} }
func TestHASubnetRouterFailoverWhenNodeDisconnects2129(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
user := "enable-routing"
scenario, err := NewScenario(dockertestMaxWait())
assertNoErrf(t, "failed to create scenario: %s", err)
// defer scenario.ShutdownAssertNoPanics(t)
spec := map[string]int{
user: 3,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{},
hsic.WithTestName("clientdisc"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithHostnameAsServerURL(),
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
expectedRoutes := map[string]string{
"1": "10.0.0.0/24",
"2": "10.0.0.0/24",
}
// Sort nodes by ID
sort.SliceStable(allClients, func(i, j int) bool {
statusI, err := allClients[i].Status()
if err != nil {
return false
}
statusJ, err := allClients[j].Status()
if err != nil {
return false
}
return statusI.Self.ID < statusJ.Self.ID
})
subRouter1 := allClients[0]
subRouter2 := allClients[1]
t.Logf("Advertise route from r1 (%s) and r2 (%s), making it HA, n1 is primary", subRouter1.Hostname(), subRouter2.Hostname())
// advertise HA route on node 1 and 2
// ID 1 will be primary
// ID 2 will be secondary
for _, client := range allClients[:2] {
status, err := client.Status()
assertNoErr(t, err)
if route, ok := expectedRoutes[string(status.Self.ID)]; ok {
command := []string{
"tailscale",
"set",
"--advertise-routes=" + route,
}
_, _, err = client.Execute(command)
assertNoErrf(t, "failed to advertise route: %s", err)
} else {
t.Fatalf("failed to find route for Node %s (id: %s)", status.Self.HostName, status.Self.ID)
}
}
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
var routes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&routes,
)
assertNoErr(t, err)
assert.Len(t, routes, 2)
t.Logf("initial routes %#v", routes)
for _, route := range routes {
assert.Equal(t, true, route.GetAdvertised())
assert.Equal(t, false, route.GetEnabled())
assert.Equal(t, false, route.GetIsPrimary())
}
// Verify that no routes has been sent to the client,
// they are not yet enabled.
for _, client := range allClients {
status, err := client.Status()
assertNoErr(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
assert.Nil(t, peerStatus.PrimaryRoutes)
}
}
// Enable all routes
for _, route := range routes {
_, err = headscale.Execute(
[]string{
"headscale",
"routes",
"enable",
"--route",
strconv.Itoa(int(route.GetId())),
})
assertNoErr(t, err)
time.Sleep(time.Second)
}
var enablingRoutes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&enablingRoutes,
)
assertNoErr(t, err)
assert.Len(t, enablingRoutes, 2)
// Node 1 is primary
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be primary")
// Node 2 is not primary
assert.Equal(t, true, enablingRoutes[1].GetAdvertised())
assert.Equal(t, true, enablingRoutes[1].GetEnabled())
assert.Equal(t, false, enablingRoutes[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be non-primary")
var nodeList []v1.Node
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&nodeList,
)
assert.Nil(t, err)
assert.Len(t, nodeList, 3)
assert.True(t, nodeList[0].Online)
assert.True(t, nodeList[1].Online)
assert.True(t, nodeList[2].Online)
// Kill off one of the docker containers to simulate a disconnect
err = scenario.DisconnectContainersFromScenario(subRouter1.Hostname())
assertNoErr(t, err)
time.Sleep(5 * time.Second)
var nodeListAfterDisconnect []v1.Node
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&nodeListAfterDisconnect,
)
assert.Nil(t, err)
assert.Len(t, nodeListAfterDisconnect, 3)
assert.False(t, nodeListAfterDisconnect[0].Online)
assert.True(t, nodeListAfterDisconnect[1].Online)
assert.True(t, nodeListAfterDisconnect[2].Online)
var routesAfterDisconnect []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&routesAfterDisconnect,
)
assertNoErr(t, err)
assert.Len(t, routesAfterDisconnect, 2)
// Node 1 is primary
assert.Equal(t, true, routesAfterDisconnect[0].GetAdvertised())
assert.Equal(t, true, routesAfterDisconnect[0].GetEnabled())
assert.Equal(t, false, routesAfterDisconnect[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be non-primary")
// Node 2 is not primary
assert.Equal(t, true, routesAfterDisconnect[1].GetAdvertised())
assert.Equal(t, true, routesAfterDisconnect[1].GetEnabled())
assert.Equal(t, true, routesAfterDisconnect[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be primary")
// // Ensure the node can reconncet as expected
// err = scenario.ConnectContainersToScenario(subRouter1.Hostname())
// assertNoErr(t, err)
// time.Sleep(5 * time.Second)
// var nodeListAfterReconnect []v1.Node
// err = executeAndUnmarshal(
// headscale,
// []string{
// "headscale",
// "nodes",
// "list",
// "--output",
// "json",
// },
// &nodeListAfterReconnect,
// )
// assert.Nil(t, err)
// assert.Len(t, nodeListAfterReconnect, 3)
// assert.True(t, nodeListAfterReconnect[0].Online)
// assert.True(t, nodeListAfterReconnect[1].Online)
// assert.True(t, nodeListAfterReconnect[2].Online)
}

View file

@ -651,3 +651,31 @@ func (s *Scenario) WaitForTailscaleLogout() error {
return nil return nil
} }
// DisconnectContainersFromScenario disconnects a list of containers from the network.
func (s *Scenario) DisconnectContainersFromScenario(containers ...string) error {
for _, container := range containers {
if ctr, ok := s.pool.ContainerByName(container); ok {
err := ctr.DisconnectFromNetwork(s.network)
if err != nil {
return err
}
}
}
return nil
}
// ConnectContainersToScenario disconnects a list of containers from the network.
func (s *Scenario) ConnectContainersToScenario(containers ...string) error {
for _, container := range containers {
if ctr, ok := s.pool.ContainerByName(container); ok {
err := ctr.ConnectToNetwork(s.network)
if err != nil {
return err
}
}
}
return nil
}