Fix golangcilint

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2022-10-18 12:19:43 +02:00
parent 8502a0acda
commit 12ee9bc02d
No known key found for this signature in database
4 changed files with 33 additions and 32 deletions

View file

@ -51,7 +51,7 @@ func TestPingAll(t *testing.T) {
allClients = append(allClients, clients...) allClients = append(allClients, clients...)
} }
scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err) t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
} }

View file

@ -135,7 +135,7 @@ func (t *HeadscaleInContainer) WaitForReady() error {
log.Printf("waiting for headscale to be ready at %s", url) log.Printf("waiting for headscale to be ready at %s", url)
return t.pool.Retry(func() error { return t.pool.Retry(func() error {
resp, err := http.Get(url) resp, err := http.Get(url) //nolint
if err != nil { if err != nil {
return fmt.Errorf("headscale is not ready: %w", err) return fmt.Errorf("headscale is not ready: %w", err)
} }

View file

@ -18,6 +18,8 @@ import (
) )
const scenarioHashLength = 6 const scenarioHashLength = 6
const maxWait = 60 * time.Second
const headscalePort = 8080
var ( var (
errNoHeadscaleAvailable = errors.New("no headscale available") errNoHeadscaleAvailable = errors.New("no headscale available")
@ -75,7 +77,7 @@ func NewScenario() (*Scenario, error) {
return nil, fmt.Errorf("could not connect to docker: %w", err) return nil, fmt.Errorf("could not connect to docker: %w", err)
} }
pool.MaxWait = 60 * time.Second pool.MaxWait = maxWait
networkName := fmt.Sprintf("hs-%s", hash) networkName := fmt.Sprintf("hs-%s", hash)
if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" { if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" {
@ -139,7 +141,7 @@ func (s *Scenario) Shutdown() error {
// TODO(kradalby): make port and headscale configurable, multiple instances support? // TODO(kradalby): make port and headscale configurable, multiple instances support?
func (s *Scenario) StartHeadscale() error { func (s *Scenario) StartHeadscale() error {
headscale, err := hsic.New(s.pool, 8080, s.network) headscale, err := hsic.New(s.pool, headscalePort, s.network)
if err != nil { if err != nil {
return fmt.Errorf("failed to create headscale container: %w", err) return fmt.Errorf("failed to create headscale container: %w", err)
} }
@ -150,6 +152,7 @@ func (s *Scenario) StartHeadscale() error {
} }
func (s *Scenario) Headscale() *hsic.HeadscaleInContainer { func (s *Scenario) Headscale() *hsic.HeadscaleInContainer {
//nolint
return s.controlServers["headscale"].(*hsic.HeadscaleInContainer) return s.controlServers["headscale"].(*hsic.HeadscaleInContainer)
} }
@ -186,33 +189,33 @@ func (s *Scenario) CreateNamespace(namespace string) error {
/// Client related stuff /// Client related stuff
func (s *Scenario) CreateTailscaleNodesInNamespace( func (s *Scenario) CreateTailscaleNodesInNamespace(
namespace string, namespaceStr string,
requestedVersion string, requestedVersion string,
count int, count int,
) error { ) error {
if ns, ok := s.namespaces[namespace]; ok { if namespace, ok := s.namespaces[namespaceStr]; ok {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
version := requestedVersion version := requestedVersion
if requestedVersion == "all" { if requestedVersion == "all" {
version = TailscaleVersions[i%len(TailscaleVersions)] version = TailscaleVersions[i%len(TailscaleVersions)]
} }
ns.createWaitGroup.Add(1) namespace.createWaitGroup.Add(1)
go func() { go func() {
defer ns.createWaitGroup.Done() defer namespace.createWaitGroup.Done()
// TODO(kradalby): error handle this // TODO(kradalby): error handle this
ts, err := tsic.New(s.pool, version, s.network) tsClient, err := tsic.New(s.pool, version, s.network)
if err != nil { if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err) // return fmt.Errorf("failed to add tailscale node: %w", err)
fmt.Printf("failed to add tailscale node: %s", err) log.Printf("failed to add tailscale node: %s", err)
} }
ns.Clients[ts.Hostname] = ts namespace.Clients[tsClient.Hostname] = tsClient
}() }()
} }
ns.createWaitGroup.Wait() namespace.createWaitGroup.Wait()
return nil return nil
} }
@ -221,20 +224,20 @@ func (s *Scenario) CreateTailscaleNodesInNamespace(
} }
func (s *Scenario) RunTailscaleUp( func (s *Scenario) RunTailscaleUp(
namespace, loginServer, authKey string, namespaceStr, loginServer, authKey string,
) error { ) error {
if ns, ok := s.namespaces[namespace]; ok { if namespace, ok := s.namespaces[namespaceStr]; ok {
for _, client := range ns.Clients { for _, client := range namespace.Clients {
ns.joinWaitGroup.Add(1) namespace.joinWaitGroup.Add(1)
go func(c *tsic.TailscaleInContainer) { go func(c *tsic.TailscaleInContainer) {
defer ns.joinWaitGroup.Done() defer namespace.joinWaitGroup.Done()
// TODO(kradalby): error handle this // TODO(kradalby): error handle this
_ = c.Up(loginServer, authKey) _ = c.Up(loginServer, authKey)
}(client) }(client)
} }
ns.joinWaitGroup.Wait() namespace.joinWaitGroup.Wait()
return nil return nil
} }
@ -245,8 +248,8 @@ func (s *Scenario) RunTailscaleUp(
func (s *Scenario) CountTailscale() int { func (s *Scenario) CountTailscale() int {
count := 0 count := 0
for _, ns := range s.namespaces { for _, namespace := range s.namespaces {
count += len(ns.Clients) count += len(namespace.Clients)
} }
return count return count
@ -255,18 +258,18 @@ func (s *Scenario) CountTailscale() int {
func (s *Scenario) WaitForTailscaleSync() error { func (s *Scenario) WaitForTailscaleSync() error {
tsCount := s.CountTailscale() tsCount := s.CountTailscale()
for _, ns := range s.namespaces { for _, namespace := range s.namespaces {
for _, client := range ns.Clients { for _, client := range namespace.Clients {
ns.syncWaitGroup.Add(1) namespace.syncWaitGroup.Add(1)
go func(c *tsic.TailscaleInContainer) { go func(c *tsic.TailscaleInContainer) {
defer ns.syncWaitGroup.Done() defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this // TODO(kradalby): error handle this
_ = c.WaitForPeers(tsCount) _ = c.WaitForPeers(tsCount)
}(client) }(client)
} }
ns.syncWaitGroup.Wait() namespace.syncWaitGroup.Wait()
} }
return nil return nil

View file

@ -22,8 +22,9 @@ const (
) )
var ( var (
errTailscalePingFailed = errors.New("ping failed") errTailscalePingFailed = errors.New("ping failed")
errTailscaleNotLoggedIn = errors.New("tailscale not logged in") errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
errTailscaleWrongPeerCount = errors.New("wrong peer count")
) )
type TailscaleInContainer struct { type TailscaleInContainer struct {
@ -207,11 +208,7 @@ func (t *TailscaleInContainer) WaitForPeers(expected int) error {
} }
if peers := status.Peers(); len(peers) != expected { if peers := status.Peers(); len(peers) != expected {
return fmt.Errorf( return errTailscaleWrongPeerCount
"tailscale client does not have the expected clients: %d out of %d",
len(peers),
expected,
)
} }
return nil return nil
@ -241,6 +238,7 @@ func (t *TailscaleInContainer) Ping(ip netip.Addr) error {
ip.String(), ip.String(),
err, err,
) )
return err return err
} }