mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-26 17:03:06 +00:00
Merge branch 'main' into fix-segfault-when-not-runner
This commit is contained in:
commit
f2f4c3f684
2 changed files with 30 additions and 9 deletions
|
@ -30,7 +30,8 @@
|
||||||
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
|
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
|
||||||
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
|
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
|
||||||
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
|
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
|
||||||
- Fix nodes being shown as 'offline' in `tailscale status` [648](https://github.com/juanfont/headscale/pull/648)
|
- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)
|
||||||
|
- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)
|
||||||
|
|
||||||
## 0.15.0 (2022-03-20)
|
## 0.15.0 (2022-03-20)
|
||||||
|
|
||||||
|
|
36
app.go
36
app.go
|
@ -54,12 +54,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AuthPrefix = "Bearer "
|
AuthPrefix = "Bearer "
|
||||||
Postgres = "postgres"
|
Postgres = "postgres"
|
||||||
Sqlite = "sqlite3"
|
Sqlite = "sqlite3"
|
||||||
updateInterval = 5000
|
updateInterval = 5000
|
||||||
HTTPReadTimeout = 30 * time.Second
|
HTTPReadTimeout = 30 * time.Second
|
||||||
privateKeyFileMode = 0o600
|
HTTPShutdownTimeout = 3 * time.Second
|
||||||
|
privateKeyFileMode = 0o600
|
||||||
|
|
||||||
registerCacheExpiration = time.Minute * 15
|
registerCacheExpiration = time.Minute * 15
|
||||||
registerCacheCleanup = time.Minute * 20
|
registerCacheCleanup = time.Minute * 20
|
||||||
|
@ -668,8 +669,13 @@ func (h *Headscale) Serve() error {
|
||||||
Msg("Received signal to stop, shutting down gracefully")
|
Msg("Received signal to stop, shutting down gracefully")
|
||||||
|
|
||||||
// Gracefully shut down servers
|
// Gracefully shut down servers
|
||||||
promHTTPServer.Shutdown(ctx)
|
ctx, cancel := context.WithTimeout(context.Background(), HTTPShutdownTimeout)
|
||||||
httpServer.Shutdown(ctx)
|
if err := promHTTPServer.Shutdown(ctx); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to shutdown prometheus http")
|
||||||
|
}
|
||||||
|
if err := httpServer.Shutdown(ctx); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to shutdown http")
|
||||||
|
}
|
||||||
grpcSocket.GracefulStop()
|
grpcSocket.GracefulStop()
|
||||||
|
|
||||||
// Close network listeners
|
// Close network listeners
|
||||||
|
@ -680,7 +686,21 @@ func (h *Headscale) Serve() error {
|
||||||
// Stop listening (and unlink the socket if unix type):
|
// Stop listening (and unlink the socket if unix type):
|
||||||
socketListener.Close()
|
socketListener.Close()
|
||||||
|
|
||||||
|
// Close db connections
|
||||||
|
db, err := h.db.DB()
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to get db handle")
|
||||||
|
}
|
||||||
|
err = db.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to close db")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Msg("Headscale stopped")
|
||||||
|
|
||||||
// And we're done:
|
// And we're done:
|
||||||
|
cancel()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue