mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-26 08:53:05 +00:00
Working on migrate DERP tests to scenarios
This commit is contained in:
parent
353f191e4f
commit
b36df48d78
2 changed files with 230 additions and 1 deletions
221
integration/derp_server_test.go
Normal file
221
integration/derp_server_test.go
Normal file
|
@ -0,0 +1,221 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type DERPServerScenario struct {
|
||||
*Scenario
|
||||
|
||||
tsicNetworks map[string]*dockertest.Network
|
||||
}
|
||||
|
||||
func TestDERPServerScenario(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := DERPServerScenario{
|
||||
Scenario: baseScenario,
|
||||
tsicNetworks: map[string]*dockertest.Network{},
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"user1": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
headscaleConfig := hsic.DefaultConfigEnv()
|
||||
// headscaleConfig["HEADSCALE_DERP_URLS"] = ""
|
||||
// headscaleConfig["HEADSCALE_DERP_SERVER_ENABLED"] = "true"
|
||||
// headscaleConfig["HEADSCALE_DERP_SERVER_REGION_ID"] = "999"
|
||||
// headscaleConfig["HEADSCALE_DERP_SERVER_REGION_CODE"] = "headscale"
|
||||
// headscaleConfig["HEADSCALE_DERP_SERVER_REGION_NAME"] = "Headscale Embedded DERP"
|
||||
// headscaleConfig["HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR"] = "0.0.0.0:3478"
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
spec,
|
||||
hsic.WithHostnameAsServerURL(),
|
||||
hsic.WithTestName("derpserver"),
|
||||
hsic.WithConfigEnv(headscaleConfig),
|
||||
hsic.WithExtraPorts([]string{"3478/udp"}),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DERPServerScenario) CreateHeadscaleEnv(
|
||||
users map[string]int,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
hs, err := s.Headscale(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hs.WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(scenarioHashLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for userName, clientCount := range users {
|
||||
log.Printf("creating user %s with %d clients", userName, clientCount)
|
||||
err = s.CreateUser(userName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleIsolatedNodesInUser(hash, userName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := s.CreatePreAuthKey(userName, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.RunTailscaleUp(userName, hs.GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DERPServerScenario) CreateTailscaleIsolatedNodesInUser(
|
||||
hash string,
|
||||
userStr string,
|
||||
requestedVersion string,
|
||||
count int,
|
||||
opts ...tsic.Option,
|
||||
) error {
|
||||
if user, ok := s.users[userStr]; ok {
|
||||
for i := 0; i < count; i++ {
|
||||
networkName := fmt.Sprintf("tsnet-%s-%s-%d",
|
||||
hash,
|
||||
userStr,
|
||||
i,
|
||||
)
|
||||
network, err := dockertestutil.GetFirstOrCreateNetwork(
|
||||
s.pool,
|
||||
networkName,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create or get %s network: %w", networkName, err)
|
||||
}
|
||||
|
||||
log.Printf("created network %s", networkName)
|
||||
|
||||
s.tsicNetworks[networkName] = network
|
||||
|
||||
version := requestedVersion
|
||||
if requestedVersion == "all" {
|
||||
version = TailscaleVersions[i%len(TailscaleVersions)]
|
||||
}
|
||||
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tailscale node: %w", err)
|
||||
}
|
||||
|
||||
cert := headscale.GetCert()
|
||||
hostname := headscale.GetHostname()
|
||||
|
||||
user.createWaitGroup.Add(1)
|
||||
|
||||
opts = append(opts,
|
||||
tsic.WithHeadscaleTLS(cert),
|
||||
tsic.WithHeadscaleName(hostname),
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer user.createWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
tsClient, err := tsic.New(
|
||||
s.pool,
|
||||
version,
|
||||
network,
|
||||
opts...,
|
||||
)
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to create tailscale node: %s", err)
|
||||
}
|
||||
|
||||
err = tsClient.WaitForReady()
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to wait for tailscaled: %s", err)
|
||||
}
|
||||
|
||||
user.Clients[tsClient.Hostname()] = tsClient
|
||||
}()
|
||||
}
|
||||
user.createWaitGroup.Wait()
|
||||
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to add tailscale node: %w", errNoUserAvailable)
|
||||
}
|
||||
|
||||
func (s *DERPServerScenario) Shutdown() error {
|
||||
for _, network := range s.tsicNetworks {
|
||||
err := s.pool.RemoveNetwork(network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return s.Scenario.Shutdown()
|
||||
}
|
|
@ -52,6 +52,7 @@ type HeadscaleInContainer struct {
|
|||
|
||||
// optional config
|
||||
port int
|
||||
extraPorts []string
|
||||
aclPolicy *headscale.ACLPolicy
|
||||
env map[string]string
|
||||
tlsCert []byte
|
||||
|
@ -108,6 +109,13 @@ func WithPort(port int) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithExtraPorts exposes additional ports on the container (e.g. 3478/udp for STUN)
|
||||
func WithExtraPorts(ports []string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.extraPorts = ports
|
||||
}
|
||||
}
|
||||
|
||||
// WithTestName sets a name for the test, this will be reflected
|
||||
// in the Docker container name.
|
||||
func WithTestName(testName string) Option {
|
||||
|
@ -187,7 +195,7 @@ func New(
|
|||
|
||||
runOptions := &dockertest.RunOptions{
|
||||
Name: hsic.hostname,
|
||||
ExposedPorts: []string{portProto},
|
||||
ExposedPorts: append([]string{portProto}, hsic.extraPorts...),
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{"headscale", "serve"},
|
||||
// TODO(kradalby): Get rid of this hack, we currently need to give us some
|
||||
|
|
Loading…
Reference in a new issue