mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-26 08:53:05 +00:00
flesh out tests
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
parent
1ec99c55e4
commit
98a65c76d3
3 changed files with 74 additions and 32 deletions
|
@ -191,6 +191,7 @@ func (m *mapSession) serve() {
|
||||||
//
|
//
|
||||||
//nolint:gocyclo
|
//nolint:gocyclo
|
||||||
func (m *mapSession) serveLongPoll() {
|
func (m *mapSession) serveLongPoll() {
|
||||||
|
start := time.Now()
|
||||||
m.beforeServeLongPoll()
|
m.beforeServeLongPoll()
|
||||||
|
|
||||||
// Clean up the session when the client disconnects
|
// Clean up the session when the client disconnects
|
||||||
|
@ -235,16 +236,6 @@ func (m *mapSession) serveLongPoll() {
|
||||||
|
|
||||||
m.pollFailoverRoutes("node connected", m.node)
|
m.pollFailoverRoutes("node connected", m.node)
|
||||||
|
|
||||||
// Upgrade the writer to a ResponseController
|
|
||||||
rc := http.NewResponseController(m.w)
|
|
||||||
|
|
||||||
// Longpolling will break if there is a write timeout,
|
|
||||||
// so it needs to be disabled.
|
|
||||||
rc.SetWriteDeadline(time.Time{})
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
||||||
|
|
||||||
m.h.nodeNotifier.AddNode(m.node.ID, m.ch)
|
m.h.nodeNotifier.AddNode(m.node.ID, m.ch)
|
||||||
|
@ -258,12 +249,12 @@ func (m *mapSession) serveLongPoll() {
|
||||||
// consume channels with update, keep alives or "batch" blocking signals
|
// consume channels with update, keep alives or "batch" blocking signals
|
||||||
select {
|
select {
|
||||||
case <-m.cancelCh:
|
case <-m.cancelCh:
|
||||||
m.tracef("poll cancelled received")
|
m.tracef("poll cancelled received (%s)", time.Since(start).String())
|
||||||
mapResponseEnded.WithLabelValues("cancelled").Inc()
|
mapResponseEnded.WithLabelValues("cancelled").Inc()
|
||||||
return
|
return
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-m.ctx.Done():
|
||||||
m.tracef("poll context done")
|
m.tracef("poll context done (%s): %s", time.Since(start).String(), m.ctx.Err().Error())
|
||||||
mapResponseEnded.WithLabelValues("done").Inc()
|
mapResponseEnded.WithLabelValues("done").Inc()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -354,14 +345,7 @@ func (m *mapSession) serveLongPoll() {
|
||||||
m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m)
|
m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.w.(http.Flusher).Flush()
|
||||||
err = rc.Flush()
|
|
||||||
if err != nil {
|
|
||||||
mapResponseSent.WithLabelValues("error", updateType).Inc()
|
|
||||||
m.errf(err, "flushing the map response to client, for mapSession: %p", m)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
|
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
|
||||||
|
|
||||||
if debugHighCardinalityMetrics {
|
if debugHighCardinalityMetrics {
|
||||||
|
@ -375,22 +359,17 @@ func (m *mapSession) serveLongPoll() {
|
||||||
case <-m.keepAliveTicker.C:
|
case <-m.keepAliveTicker.C:
|
||||||
data, err := m.mapper.KeepAliveResponse(m.req, m.node)
|
data, err := m.mapper.KeepAliveResponse(m.req, m.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.errf(err, "Error generating the keep alive msg")
|
m.errf(err, "Error generating the keepalive msg")
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = m.w.Write(data)
|
_, err = m.w.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.errf(err, "Cannot write keep alive message")
|
m.errf(err, "Cannot write keepalive message")
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = rc.Flush()
|
|
||||||
if err != nil {
|
|
||||||
m.errf(err, "flushing keep alive to client, for mapSession: %p", m)
|
|
||||||
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
mapResponseSent.WithLabelValues("error", "keepalive").Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
m.w.(http.Flusher).Flush()
|
||||||
|
|
||||||
if debugHighCardinalityMetrics {
|
if debugHighCardinalityMetrics {
|
||||||
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))
|
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))
|
||||||
|
|
|
@ -1492,7 +1492,7 @@ func TestHASubnetRouterFailoverWhenNodeDisconnects2129(t *testing.T) {
|
||||||
assert.True(t, nodeList[2].Online)
|
assert.True(t, nodeList[2].Online)
|
||||||
|
|
||||||
// Kill off one of the docker containers to simulate a disconnect
|
// Kill off one of the docker containers to simulate a disconnect
|
||||||
err = scenario.DisconnectContainers(subRouter1.Hostname())
|
err = scenario.DisconnectContainersFromScenario(subRouter1.Hostname())
|
||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
@ -1514,4 +1514,53 @@ func TestHASubnetRouterFailoverWhenNodeDisconnects2129(t *testing.T) {
|
||||||
assert.False(t, nodeListAfterDisconnect[0].Online)
|
assert.False(t, nodeListAfterDisconnect[0].Online)
|
||||||
assert.True(t, nodeListAfterDisconnect[1].Online)
|
assert.True(t, nodeListAfterDisconnect[1].Online)
|
||||||
assert.True(t, nodeListAfterDisconnect[2].Online)
|
assert.True(t, nodeListAfterDisconnect[2].Online)
|
||||||
|
|
||||||
|
var routesAfterDisconnect []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routesAfterDisconnect,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routesAfterDisconnect, 2)
|
||||||
|
|
||||||
|
// Node 1 is primary
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[0].GetEnabled())
|
||||||
|
assert.Equal(t, false, routesAfterDisconnect[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be non-primary")
|
||||||
|
|
||||||
|
// Node 2 is not primary
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetAdvertised())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetEnabled())
|
||||||
|
assert.Equal(t, true, routesAfterDisconnect[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be primary")
|
||||||
|
|
||||||
|
// // Ensure the node can reconncet as expected
|
||||||
|
// err = scenario.ConnectContainersToScenario(subRouter1.Hostname())
|
||||||
|
// assertNoErr(t, err)
|
||||||
|
|
||||||
|
// time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// var nodeListAfterReconnect []v1.Node
|
||||||
|
// err = executeAndUnmarshal(
|
||||||
|
// headscale,
|
||||||
|
// []string{
|
||||||
|
// "headscale",
|
||||||
|
// "nodes",
|
||||||
|
// "list",
|
||||||
|
// "--output",
|
||||||
|
// "json",
|
||||||
|
// },
|
||||||
|
// &nodeListAfterReconnect,
|
||||||
|
// )
|
||||||
|
// assert.Nil(t, err)
|
||||||
|
// assert.Len(t, nodeListAfterReconnect, 3)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[0].Online)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[1].Online)
|
||||||
|
// assert.True(t, nodeListAfterReconnect[2].Online)
|
||||||
}
|
}
|
||||||
|
|
|
@ -650,8 +650,8 @@ func (s *Scenario) WaitForTailscaleLogout() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisconnectContainers disconnects a list of containers from the network.
|
// DisconnectContainersFromScenario disconnects a list of containers from the network.
|
||||||
func (s *Scenario) DisconnectContainers(containers ...string) error {
|
func (s *Scenario) DisconnectContainersFromScenario(containers ...string) error {
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
if ctr, ok := s.pool.ContainerByName(container); ok {
|
if ctr, ok := s.pool.ContainerByName(container); ok {
|
||||||
err := ctr.DisconnectFromNetwork(s.network)
|
err := ctr.DisconnectFromNetwork(s.network)
|
||||||
|
@ -663,3 +663,17 @@ func (s *Scenario) DisconnectContainers(containers ...string) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConnectContainersToScenario disconnects a list of containers from the network.
|
||||||
|
func (s *Scenario) ConnectContainersToScenario(containers ...string) error {
|
||||||
|
for _, container := range containers {
|
||||||
|
if ctr, ok := s.pool.ContainerByName(container); ok {
|
||||||
|
err := ctr.ConnectToNetwork(s.network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue