mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-30 02:43:05 +00:00
Compare commits
4 commits
50780fa10c
...
b049a26cd9
Author | SHA1 | Date | |
---|---|---|---|
|
b049a26cd9 | ||
|
e7245856c5 | ||
|
98a65c76d3 | ||
|
1ec99c55e4 |
6 changed files with 327 additions and 61 deletions
1
.github/workflows/test-integration.yaml
vendored
1
.github/workflows/test-integration.yaml
vendored
|
@ -60,6 +60,7 @@ jobs:
|
||||||
- TestEnableDisableAutoApprovedRoute
|
- TestEnableDisableAutoApprovedRoute
|
||||||
- TestAutoApprovedSubRoute2068
|
- TestAutoApprovedSubRoute2068
|
||||||
- TestSubnetRouteACL
|
- TestSubnetRouteACL
|
||||||
|
- TestHASubnetRouterFailoverWhenNodeDisconnects2129
|
||||||
- TestHeadscale
|
- TestHeadscale
|
||||||
- TestCreateTailscale
|
- TestCreateTailscale
|
||||||
- TestTailscaleNodesJoiningHeadcale
|
- TestTailscaleNodesJoiningHeadcale
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
.github/workflows/test-integration-v2*
|
.github/workflows/test-integration-v2*
|
||||||
docs/about/features.md
|
docs/about/features.md
|
||||||
|
docs/ref/remote-cli.md
|
||||||
|
|
|
@ -1,22 +1,21 @@
|
||||||
# Controlling headscale with remote CLI
|
# Controlling headscale with remote CLI
|
||||||
|
|
||||||
This documentation has the goal of showing a user how-to set control a headscale instance
|
This documentation has the goal of showing a user how-to control a headscale instance
|
||||||
from a remote machine with the `headscale` command line binary.
|
from a remote machine with the `headscale` command line binary.
|
||||||
|
|
||||||
## Prerequisite
|
## Prerequisite
|
||||||
|
|
||||||
- A workstation to run headscale (could be Linux, macOS, other supported platforms)
|
- A workstation to run `headscale` (any supported platform, e.g. Linux).
|
||||||
- A headscale server (version `0.13.0` or newer)
|
- A headscale server with gRPC enabled.
|
||||||
- Access to create API keys (local access to the headscale server)
|
- Connections to the gRPC port (default: `50443`) are allowed.
|
||||||
- headscale _must_ be served over TLS/HTTPS
|
- Remote access requires an encrypted connection via TLS.
|
||||||
- Remote access does _not_ support unencrypted traffic.
|
- An API key to authenticate with the headscale server.
|
||||||
- Port `50443` must be open in the firewall (or port overridden by `grpc_listen_addr` option)
|
|
||||||
|
|
||||||
## Create an API key
|
## Create an API key
|
||||||
|
|
||||||
We need to create an API key to authenticate our remote headscale when using it from our workstation.
|
We need to create an API key to authenticate with the remote headscale server when using it from our workstation.
|
||||||
|
|
||||||
To create a API key, log into your headscale server and generate a key:
|
To create an API key, log into your headscale server and generate a key:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
headscale apikeys create --expiration 90d
|
headscale apikeys create --expiration 90d
|
||||||
|
@ -25,7 +24,7 @@ headscale apikeys create --expiration 90d
|
||||||
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
|
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
|
||||||
if the key is lost, expire the old one, and create a new key.
|
if the key is lost, expire the old one, and create a new key.
|
||||||
|
|
||||||
To list the keys currently assosicated with the server:
|
To list the keys currently associated with the server:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
headscale apikeys list
|
headscale apikeys list
|
||||||
|
@ -39,7 +38,8 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||||
|
|
||||||
## Download and configure headscale
|
## Download and configure headscale
|
||||||
|
|
||||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
|
||||||
|
sure to use the same version as on the server.
|
||||||
|
|
||||||
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||||
|
|
||||||
|
@ -49,25 +49,32 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||||
chmod +x /usr/local/bin/headscale
|
chmod +x /usr/local/bin/headscale
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Configure the CLI through environment variables
|
1. Provide the connection parameters for the remote headscale server either via a minimal YAML configuration file or via
|
||||||
|
environment variables:
|
||||||
|
|
||||||
```shell
|
=== "Minimal YAML configuration file"
|
||||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>"
|
|
||||||
export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>"
|
```yaml
|
||||||
|
cli:
|
||||||
|
address: <HEADSCALE_ADDRESS>:<PORT>
|
||||||
|
api_key: <API_KEY_FROM_PREVIOUS_STEP>
|
||||||
```
|
```
|
||||||
|
|
||||||
for example:
|
=== "Environment variables"
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443"
|
export HEADSCALE_CLI_ADDRESS="<HEADSCALE_ADDRESS>:<PORT>"
|
||||||
export HEADSCALE_CLI_API_KEY="abcde12345"
|
export HEADSCALE_CLI_API_KEY="<API_KEY_FROM_PREVIOUS_STEP>"
|
||||||
```
|
```
|
||||||
|
|
||||||
This will tell the `headscale` binary to connect to a remote instance, instead of looking
|
!!! bug
|
||||||
for a local instance (which is what it does on the server).
|
|
||||||
|
|
||||||
The API key is needed to make sure that you are allowed to access the server. The key is _not_
|
Headscale 0.23.0 requires at least an empty configuration file when environment variables are used to
|
||||||
needed when running directly on the server, as the connection is local.
|
specify connection details. See [issue 2193](https://github.com/juanfont/headscale/issues/2193) for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
|
||||||
|
connecting to the local instance.
|
||||||
|
|
||||||
1. Test the connection
|
1. Test the connection
|
||||||
|
|
||||||
|
@ -89,10 +96,10 @@ While this is _not a supported_ feature, an example on how this can be set up on
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
Checklist:
|
- Make sure you have the _same_ headscale version on your server and workstation.
|
||||||
|
- Ensure that connections to the gRPC port are allowed.
|
||||||
- Make sure you have the _same_ headscale version on your server and workstation
|
- Verify that your TLS certificate is valid and trusted.
|
||||||
- Make sure you use version `0.13.0` or newer.
|
- If you don't have access to a trusted certificate (e.g. from Let's Encrypt), either:
|
||||||
- Verify that your TLS certificate is valid and trusted
|
- Add your self-signed certificate to the trust store of your OS _or_
|
||||||
- If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or
|
- Disable certificate verification by either setting `cli.insecure: true` in the configuration file or by setting
|
||||||
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environment
|
`HEADSCALE_CLI_INSECURE=1` via an environment variable. We do **not** recommend to disable certificate validation.
|
||||||
|
|
|
@ -191,6 +191,7 @@ func (m *mapSession) serve() {
|
||||||
//
|
//
|
||||||
//nolint:gocyclo
|
//nolint:gocyclo
|
||||||
func (m *mapSession) serveLongPoll() {
|
func (m *mapSession) serveLongPoll() {
|
||||||
|
start := time.Now()
|
||||||
m.beforeServeLongPoll()
|
m.beforeServeLongPoll()
|
||||||
|
|
||||||
// Clean up the session when the client disconnects
|
// Clean up the session when the client disconnects
|
||||||
|
@ -220,16 +221,6 @@ func (m *mapSession) serveLongPoll() {
|
||||||
|
|
||||||
m.pollFailoverRoutes("node connected", m.node)
|
m.pollFailoverRoutes("node connected", m.node)
|
||||||
|
|
||||||
// Upgrade the writer to a ResponseController
|
|
||||||
rc := http.NewResponseController(m.w)
|
|
||||||
|
|
||||||
// Longpolling will break if there is a write timeout,
|
|
||||||
// so it needs to be disabled.
|
|
||||||
rc.SetWriteDeadline(time.Time{})
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
||||||
|
|
||||||
m.h.nodeNotifier.AddNode(m.node.ID, m.ch)
|
m.h.nodeNotifier.AddNode(m.node.ID, m.ch)
|
||||||
|
@ -243,12 +234,12 @@ func (m *mapSession) serveLongPoll() {
|
||||||
// consume channels with update, keep alives or "batch" blocking signals
|
// consume channels with update, keep alives or "batch" blocking signals
|
||||||
select {
|
select {
|
||||||
case <-m.cancelCh:
|
case <-m.cancelCh:
|
||||||
m.tracef("poll cancelled received")
|
m.tracef("poll cancelled received (%s)", time.Since(start).String())
|
||||||
mapResponseEnded.WithLabelValues("cancelled").Inc()
|
mapResponseEnded.WithLabelValues("cancelled").Inc()
|
||||||
return
|
return
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-m.ctx.Done():
|
||||||
m.tracef("poll context done")
|
m.tracef("poll context done (%s): %s", time.Since(start).String(), m.ctx.Err().Error())
|
||||||
mapResponseEnded.WithLabelValues("done").Inc()
|
mapResponseEnded.WithLabelValues("done").Inc()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -339,14 +330,7 @@ func (m *mapSession) serveLongPoll() {
|
||||||
m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m)
|
m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.w.(http.Flusher).Flush()
|
||||||
err = rc.Flush()
|
|
||||||
if err != nil {
|
|
||||||
mapResponseSent.WithLabelValues("error", updateType).Inc()
|
|
||||||
m.errf(err, "flushing the map response to client, for mapSession: %p", m)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
|
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
|
||||||
|
|
||||||
if debugHighCardinalityMetrics {
|
if debugHighCardinalityMetrics {
|
||||||
|
@ -360,22 +344,17 @@ func (m *mapSession) serveLongPoll() {
|
||||||
case <-m.keepAliveTicker.C:
|
case <-m.keepAliveTicker.C:
|
||||||
data, err := m.mapper.KeepAliveResponse(m.req, m.node)
|
data, err := m.mapper.KeepAliveResponse(m.req, m.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.errf(err, "Error generating the keep alive msg")
|
m.errf(err, "Error generating the keepalive msg")
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = m.w.Write(data)
|
_, err = m.w.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.errf(err, "Cannot write keep alive message")
|
m.errf(err, "Cannot write keepalive message")
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = rc.Flush()
|
|
||||||
if err != nil {
|
|
||||||
m.errf(err, "flushing keep alive to client, for mapSession: %p", m)
|
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.w.(http.Flusher).Flush()
|
||||||
|
|
||||||
if debugHighCardinalityMetrics {
|
if debugHighCardinalityMetrics {
|
||||||
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))
|
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
"github.com/juanfont/headscale/hscontrol/policy"
|
"github.com/juanfont/headscale/hscontrol/policy"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
|
@ -1316,3 +1317,252 @@ func TestSubnetRouteACL(t *testing.T) {
|
||||||
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
|
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHASubnetRouterFailoverWhenNodeDisconnects2129(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
user := "enable-routing"
|
||||||
|
|
||||||
|
scenario, err := NewScenario(dockertestMaxWait())
|
||||||
|
assertNoErrf(t, "failed to create scenario: %s", err)
|
||||||
|
// defer scenario.ShutdownAssertNoPanics(t)
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
user: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec,
|
||||||
|
[]tsic.Option{},
|
||||||
|
hsic.WithTestName("clientdisc"),
|
||||||
|
hsic.WithEmbeddedDERPServerOnly(),
|
||||||
|
hsic.WithTLS(),
|
||||||
|
hsic.WithHostnameAsServerURL(),
|
||||||
|
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
|
||||||
|
)
|
||||||
|
assertNoErrHeadscaleEnv(t, err)
|
||||||
|
|
||||||
|
allClients, err := scenario.ListTailscaleClients()
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErrGetHeadscale(t, err)
|
||||||
|
|
||||||
|
expectedRoutes := map[string]string{
|
||||||
|
"1": "10.0.0.0/24",
|
||||||
|
"2": "10.0.0.0/24",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort nodes by ID
|
||||||
|
sort.SliceStable(allClients, func(i, j int) bool {
|
||||||
|
statusI, err := allClients[i].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
statusJ, err := allClients[j].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusI.Self.ID < statusJ.Self.ID
|
||||||
|
})
|
||||||
|
|
||||||
|
subRouter1 := allClients[0]
|
||||||
|
subRouter2 := allClients[1]
|
||||||
|
|
||||||
|
t.Logf("Advertise route from r1 (%s) and r2 (%s), making it HA, n1 is primary", subRouter1.Hostname(), subRouter2.Hostname())
|
||||||
|
// advertise HA route on node 1 and 2
|
||||||
|
// ID 1 will be primary
|
||||||
|
// ID 2 will be secondary
|
||||||
|
for _, client := range allClients[:2] {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
if route, ok := expectedRoutes[string(status.Self.ID)]; ok {
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=" + route,
|
||||||
|
}
|
||||||
|
_, _, err = client.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to advertise route: %s", err)
|
||||||
|
} else {
|
||||||
|
t.Fatalf("failed to find route for Node %s (id: %s)", status.Self.HostName, status.Self.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
var routes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routes,
|
||||||
|
)
|
||||||
|
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routes, 2)
|
||||||
|
|
||||||
|
t.Logf("initial routes %#v", routes)
|
||||||
|
|
||||||
|
for _, route := range routes {
|
||||||
|
assert.Equal(t, true, route.GetAdvertised())
|
||||||
|
assert.Equal(t, false, route.GetEnabled())
|
||||||
|
assert.Equal(t, false, route.GetIsPrimary())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that no routes has been sent to the client,
|
||||||
|
// they are not yet enabled.
|
||||||
|
for _, client := range allClients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
for _, peerKey := range status.Peers() {
|
||||||
|
peerStatus := status.Peer[peerKey]
|
||||||
|
|
||||||
|
assert.Nil(t, peerStatus.PrimaryRoutes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable all routes
|
||||||
|
for _, route := range routes {
|
||||||
|
_, err = headscale.Execute(
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"enable",
|
||||||
|
"--route",
|
||||||
|
strconv.Itoa(int(route.GetId())),
|
||||||
|
})
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
var enablingRoutes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&enablingRoutes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, enablingRoutes, 2)
|
||||||
|
|
||||||
|
// Node 1 is primary
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be primary")
|
||||||
|
|
||||||
|
// Node 2 is not primary
|
||||||
|
assert.Equal(t, true, enablingRoutes[1].GetAdvertised())
|
||||||
|
assert.Equal(t, true, enablingRoutes[1].GetEnabled())
|
||||||
|
assert.Equal(t, false, enablingRoutes[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be non-primary")
|
||||||
|
|
||||||
|
var nodeList []v1.Node
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"nodes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&nodeList,
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Len(t, nodeList, 3)
|
||||||
|
assert.True(t, nodeList[0].Online)
|
||||||
|
assert.True(t, nodeList[1].Online)
|
||||||
|
assert.True(t, nodeList[2].Online)
|
||||||
|
|
||||||
|
// Kill off one of the docker containers to simulate a disconnect
|
||||||
|
err = scenario.DisconnectContainersFromScenario(subRouter1.Hostname())
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
var nodeListAfterDisconnect []v1.Node
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"nodes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&nodeListAfterDisconnect,
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Len(t, nodeListAfterDisconnect, 3)
|
||||||
|
assert.False(t, nodeListAfterDisconnect[0].Online)
|
||||||
|
assert.True(t, nodeListAfterDisconnect[1].Online)
|
||||||
|
assert.True(t, nodeListAfterDisconnect[2].Online)
|
||||||
|
|
||||||
|
var routesAfterDisconnect []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routesAfterDisconnect,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routesAfterDisconnect, 2)
|
||||||
|
|
||||||
|
// Node 1 is primary
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[0].GetEnabled())
|
||||||
|
assert.Equal(t, false, routesAfterDisconnect[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be non-primary")
|
||||||
|
|
||||||
|
// Node 2 is not primary
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetAdvertised())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetEnabled())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be primary")
|
||||||
|
|
||||||
|
// // Ensure the node can reconncet as expected
|
||||||
|
// err = scenario.ConnectContainersToScenario(subRouter1.Hostname())
|
||||||
|
// assertNoErr(t, err)
|
||||||
|
|
||||||
|
// time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// var nodeListAfterReconnect []v1.Node
|
||||||
|
// err = executeAndUnmarshal(
|
||||||
|
// headscale,
|
||||||
|
// []string{
|
||||||
|
// "headscale",
|
||||||
|
// "nodes",
|
||||||
|
// "list",
|
||||||
|
// "--output",
|
||||||
|
// "json",
|
||||||
|
// },
|
||||||
|
// &nodeListAfterReconnect,
|
||||||
|
// )
|
||||||
|
// assert.Nil(t, err)
|
||||||
|
// assert.Len(t, nodeListAfterReconnect, 3)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[0].Online)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[1].Online)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[2].Online)
|
||||||
|
}
|
||||||
|
|
|
@ -651,3 +651,31 @@ func (s *Scenario) WaitForTailscaleLogout() error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DisconnectContainersFromScenario disconnects a list of containers from the network.
|
||||||
|
func (s *Scenario) DisconnectContainersFromScenario(containers ...string) error {
|
||||||
|
for _, container := range containers {
|
||||||
|
if ctr, ok := s.pool.ContainerByName(container); ok {
|
||||||
|
err := ctr.DisconnectFromNetwork(s.network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectContainersToScenario disconnects a list of containers from the network.
|
||||||
|
func (s *Scenario) ConnectContainersToScenario(containers ...string) error {
|
||||||
|
for _, container := range containers {
|
||||||
|
if ctr, ok := s.pool.ContainerByName(container); ok {
|
||||||
|
err := ctr.ConnectToNetwork(s.network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue