From 9047c09871dec7f84a00311e3ee14365f870810d Mon Sep 17 00:00:00 2001 From: Pallab Pain Date: Fri, 9 Feb 2024 22:04:28 +0530 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20feat:=20add=20pqsql=20configs=20for?= =?UTF-8?q?=20open=20and=20idle=20connections=20(#1583)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When Postgres is used as the backing database for headscale, it does not set a limit on maximum open and idle connections which leads to hundreds of open connections to the Postgres server. This commit introduces the configuration variables to set those values and also sets default while opening a new postgres connection. --- CHANGELOG.md | 1 + config-example.yaml | 3 + hscontrol/db/db.go | 478 ++++++++++++++++++++------------------ hscontrol/types/config.go | 47 ++-- 4 files changed, 287 insertions(+), 242 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3adb23be..9c1d04b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565) - Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700) - Old structure is now considered deprecated and will be removed in the future. + - Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime. ## 0.22.3 (2023-05-12) diff --git a/config-example.yaml b/config-example.yaml index 8e4373fc..d41771f9 100644 --- a/config-example.yaml +++ b/config-example.yaml @@ -153,6 +153,9 @@ database: # name: headscale # user: foo # pass: bar + # max_open_conns: 10 + # max_idle_conns: 10 + # conn_max_idle_time_secs: 3600 # # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need # # in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1. diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index fe77dda8..4ded07fb 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -12,13 +12,14 @@ import ( "github.com/glebarez/sqlite" "github.com/go-gormigrate/gormigrate/v2" - "github.com/juanfont/headscale/hscontrol/notifier" - "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "gorm.io/driver/postgres" "gorm.io/gorm" "gorm.io/gorm/logger" + + "github.com/juanfont/headscale/hscontrol/notifier" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" ) var errDatabaseNotSupported = errors.New("database type not supported") @@ -50,259 +51,273 @@ func NewHeadscaleDatabase( return nil, err } - migrations := gormigrate.New(dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{ - // New migrations should be added as transactions at the end of this list. - // The initial commit here is quite messy, completely out of order and - // has no versioning and is the tech debt of not having versioned migrations - // prior to this point. This first migration is all DB changes to bring a DB - // up to 0.23.0. - { - ID: "202312101416", - Migrate: func(tx *gorm.DB) error { - if cfg.Type == types.DatabasePostgres { - tx.Exec(`create extension if not exists "uuid-ossp";`) - } - - _ = tx.Migrator().RenameTable("namespaces", "users") - - // the big rename from Machine to Node - _ = tx.Migrator().RenameTable("machines", "nodes") - _ = tx.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id") - - err = tx.AutoMigrate(types.User{}) - if err != nil { - return err - } - - _ = tx.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id") - _ = tx.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id") - - _ = tx.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses") - _ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname") - - // GivenName is used as the primary source of DNS names, make sure - // the field is populated and normalized if it was not when the - // node was registered. - _ = tx.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name") - - // If the Node table has a column for registered, - // find all occourences of "false" and drop them. Then - // remove the column. - if tx.Migrator().HasColumn(&types.Node{}, "registered") { - log.Info(). - Msg(`Database has legacy "registered" column in node, removing...`) - - nodes := types.Nodes{} - if err := tx.Not("registered").Find(&nodes).Error; err != nil { - log.Error().Err(err).Msg("Error accessing db") + migrations := gormigrate.New( + dbConn, + gormigrate.DefaultOptions, + []*gormigrate.Migration{ + // New migrations should be added as transactions at the end of this list. + // The initial commit here is quite messy, completely out of order and + // has no versioning and is the tech debt of not having versioned migrations + // prior to this point. This first migration is all DB changes to bring a DB + // up to 0.23.0. + { + ID: "202312101416", + Migrate: func(tx *gorm.DB) error { + if cfg.Type == types.DatabasePostgres { + tx.Exec(`create extension if not exists "uuid-ossp";`) } - for _, node := range nodes { - log.Info(). - Str("node", node.Hostname). - Str("machine_key", node.MachineKey.ShortString()). - Msg("Deleting unregistered node") - if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil { - log.Error(). - Err(err). - Str("node", node.Hostname). - Str("machine_key", node.MachineKey.ShortString()). - Msg("Error deleting unregistered node") - } - } + _ = tx.Migrator().RenameTable("namespaces", "users") - err := tx.Migrator().DropColumn(&types.Node{}, "registered") - if err != nil { - log.Error().Err(err).Msg("Error dropping registered column") - } - } + // the big rename from Machine to Node + _ = tx.Migrator().RenameTable("machines", "nodes") + _ = tx.Migrator(). + RenameColumn(&types.Route{}, "machine_id", "node_id") - err = tx.AutoMigrate(&types.Route{}) - if err != nil { - return err - } - - err = tx.AutoMigrate(&types.Node{}) - if err != nil { - return err - } - - // Ensure all keys have correct prefixes - // https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35 - type result struct { - ID uint64 - MachineKey string - NodeKey string - DiscoKey string - } - var results []result - err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes"). - Find(&results). - Error - if err != nil { - return err - } - - for _, node := range results { - mKey := node.MachineKey - if !strings.HasPrefix(node.MachineKey, "mkey:") { - mKey = "mkey:" + node.MachineKey - } - nKey := node.NodeKey - if !strings.HasPrefix(node.NodeKey, "nodekey:") { - nKey = "nodekey:" + node.NodeKey - } - - dKey := node.DiscoKey - if !strings.HasPrefix(node.DiscoKey, "discokey:") { - dKey = "discokey:" + node.DiscoKey - } - - err := tx.Exec( - "UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id", - sql.Named("mKey", mKey), - sql.Named("nKey", nKey), - sql.Named("dKey", dKey), - sql.Named("id", node.ID), - ).Error + err = tx.AutoMigrate(types.User{}) if err != nil { return err } - } - if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") { - log.Info(). - Msgf("Database has legacy enabled_routes column in node, migrating...") + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "namespace_id", "user_id") + _ = tx.Migrator(). + RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id") - type NodeAux struct { - ID uint64 - EnabledRoutes types.IPPrefixes - } + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "ip_address", "ip_addresses") + _ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname") - nodesAux := []NodeAux{} - err := tx.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error - if err != nil { - log.Fatal().Err(err).Msg("Error accessing db") - } - for _, node := range nodesAux { - for _, prefix := range node.EnabledRoutes { - if err != nil { + // GivenName is used as the primary source of DNS names, make sure + // the field is populated and normalized if it was not when the + // node was registered. + _ = tx.Migrator(). + RenameColumn(&types.Node{}, "nickname", "given_name") + + // If the Node table has a column for registered, + // find all occourences of "false" and drop them. Then + // remove the column. + if tx.Migrator().HasColumn(&types.Node{}, "registered") { + log.Info(). + Msg(`Database has legacy "registered" column in node, removing...`) + + nodes := types.Nodes{} + if err := tx.Not("registered").Find(&nodes).Error; err != nil { + log.Error().Err(err).Msg("Error accessing db") + } + + for _, node := range nodes { + log.Info(). + Str("node", node.Hostname). + Str("machine_key", node.MachineKey.ShortString()). + Msg("Deleting unregistered node") + if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil { log.Error(). Err(err). - Str("enabled_route", prefix.String()). - Msg("Error parsing enabled_route") - - continue + Str("node", node.Hostname). + Str("machine_key", node.MachineKey.ShortString()). + Msg("Error deleting unregistered node") } + } - err = tx.Preload("Node"). - Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)). - First(&types.Route{}). - Error - if err == nil { - log.Info(). - Str("enabled_route", prefix.String()). - Msg("Route already migrated to new table, skipping") + err := tx.Migrator().DropColumn(&types.Node{}, "registered") + if err != nil { + log.Error().Err(err).Msg("Error dropping registered column") + } + } - continue + err = tx.AutoMigrate(&types.Route{}) + if err != nil { + return err + } + + err = tx.AutoMigrate(&types.Node{}) + if err != nil { + return err + } + + // Ensure all keys have correct prefixes + // https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35 + type result struct { + ID uint64 + MachineKey string + NodeKey string + DiscoKey string + } + var results []result + err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes"). + Find(&results). + Error + if err != nil { + return err + } + + for _, node := range results { + mKey := node.MachineKey + if !strings.HasPrefix(node.MachineKey, "mkey:") { + mKey = "mkey:" + node.MachineKey + } + nKey := node.NodeKey + if !strings.HasPrefix(node.NodeKey, "nodekey:") { + nKey = "nodekey:" + node.NodeKey + } + + dKey := node.DiscoKey + if !strings.HasPrefix(node.DiscoKey, "discokey:") { + dKey = "discokey:" + node.DiscoKey + } + + err := tx.Exec( + "UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id", + sql.Named("mKey", mKey), + sql.Named("nKey", nKey), + sql.Named("dKey", dKey), + sql.Named("id", node.ID), + ).Error + if err != nil { + return err + } + } + + if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") { + log.Info(). + Msgf("Database has legacy enabled_routes column in node, migrating...") + + type NodeAux struct { + ID uint64 + EnabledRoutes types.IPPrefixes + } + + nodesAux := []NodeAux{} + err := tx.Table("nodes"). + Select("id, enabled_routes"). + Scan(&nodesAux). + Error + if err != nil { + log.Fatal().Err(err).Msg("Error accessing db") + } + for _, node := range nodesAux { + for _, prefix := range node.EnabledRoutes { + if err != nil { + log.Error(). + Err(err). + Str("enabled_route", prefix.String()). + Msg("Error parsing enabled_route") + + continue + } + + err = tx.Preload("Node"). + Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)). + First(&types.Route{}). + Error + if err == nil { + log.Info(). + Str("enabled_route", prefix.String()). + Msg("Route already migrated to new table, skipping") + + continue + } + + route := types.Route{ + NodeID: node.ID, + Advertised: true, + Enabled: true, + Prefix: types.IPPrefix(prefix), + } + if err := tx.Create(&route).Error; err != nil { + log.Error().Err(err).Msg("Error creating route") + } else { + log.Info(). + Uint64("node_id", route.NodeID). + Str("prefix", prefix.String()). + Msg("Route migrated") + } } + } - route := types.Route{ - NodeID: node.ID, - Advertised: true, - Enabled: true, - Prefix: types.IPPrefix(prefix), - } - if err := tx.Create(&route).Error; err != nil { - log.Error().Err(err).Msg("Error creating route") - } else { - log.Info(). - Uint64("node_id", route.NodeID). - Str("prefix", prefix.String()). - Msg("Route migrated") + err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes") + if err != nil { + log.Error(). + Err(err). + Msg("Error dropping enabled_routes column") + } + } + + if tx.Migrator().HasColumn(&types.Node{}, "given_name") { + nodes := types.Nodes{} + if err := tx.Find(&nodes).Error; err != nil { + log.Error().Err(err).Msg("Error accessing db") + } + + for item, node := range nodes { + if node.GivenName == "" { + normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper( + node.Hostname, + ) + if err != nil { + log.Error(). + Caller(). + Str("hostname", node.Hostname). + Err(err). + Msg("Failed to normalize node hostname in DB migration") + } + + err = tx.Model(nodes[item]).Updates(types.Node{ + GivenName: normalizedHostname, + }).Error + if err != nil { + log.Error(). + Caller(). + Str("hostname", node.Hostname). + Err(err). + Msg("Failed to save normalized node name in DB migration") + } } } } - err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes") + err = tx.AutoMigrate(&KV{}) if err != nil { - log.Error().Err(err).Msg("Error dropping enabled_routes column") - } - } - - if tx.Migrator().HasColumn(&types.Node{}, "given_name") { - nodes := types.Nodes{} - if err := tx.Find(&nodes).Error; err != nil { - log.Error().Err(err).Msg("Error accessing db") + return err } - for item, node := range nodes { - if node.GivenName == "" { - normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper( - node.Hostname, - ) - if err != nil { - log.Error(). - Caller(). - Str("hostname", node.Hostname). - Err(err). - Msg("Failed to normalize node hostname in DB migration") - } - - err = tx.Model(nodes[item]).Updates(types.Node{ - GivenName: normalizedHostname, - }).Error - if err != nil { - log.Error(). - Caller(). - Str("hostname", node.Hostname). - Err(err). - Msg("Failed to save normalized node name in DB migration") - } - } + err = tx.AutoMigrate(&types.PreAuthKey{}) + if err != nil { + return err } - } - err = tx.AutoMigrate(&KV{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&types.PreAuthKeyACLTag{}) + if err != nil { + return err + } - err = tx.AutoMigrate(&types.PreAuthKey{}) - if err != nil { - return err - } + _ = tx.Migrator().DropTable("shared_machines") - err = tx.AutoMigrate(&types.PreAuthKeyACLTag{}) - if err != nil { - return err - } + err = tx.AutoMigrate(&types.APIKey{}) + if err != nil { + return err + } - _ = tx.Migrator().DropTable("shared_machines") - - err = tx.AutoMigrate(&types.APIKey{}) - if err != nil { - return err - } - - return nil + return nil + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, }, - Rollback: func(tx *gorm.DB) error { - return nil + { + // drop key-value table, it is not used, and has not contained + // useful data for a long time or ever. + ID: "202312101430", + Migrate: func(tx *gorm.DB) error { + return tx.Migrator().DropTable("kvs") + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, }, }, - { - // drop key-value table, it is not used, and has not contained - // useful data for a long time or ever. - ID: "202312101430", - Migrate: func(tx *gorm.DB) error { - return tx.Migrator().DropTable("kvs") - }, - Rollback: func(tx *gorm.DB) error { - return nil - }, - }, - }) + ) if err = migrations.Migrate(); err != nil { log.Fatal().Err(err).Msgf("Migration failed: %v", err) @@ -319,7 +334,6 @@ func NewHeadscaleDatabase( } func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { - // TODO(kradalby): Integrate this with zerolog var dbLogger logger.Interface if cfg.Debug { @@ -374,10 +388,22 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { dbString += fmt.Sprintf(" password=%s", cfg.Postgres.Pass) } - return gorm.Open(postgres.Open(dbString), &gorm.Config{ + db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{ DisableForeignKeyConstraintWhenMigrating: true, Logger: dbLogger, }) + if err != nil { + return nil, err + } + + sqlDB, _ := db.DB() + sqlDB.SetMaxIdleConns(cfg.Postgres.MaxIdleConnections) + sqlDB.SetMaxOpenConns(cfg.Postgres.MaxOpenConnections) + sqlDB.SetConnMaxIdleTime( + time.Duration(cfg.Postgres.ConnMaxIdleTimeSecs) * time.Second, + ) + + return db, nil } return nil, fmt.Errorf( diff --git a/hscontrol/types/config.go b/hscontrol/types/config.go index d83b21f7..a82218e6 100644 --- a/hscontrol/types/config.go +++ b/hscontrol/types/config.go @@ -11,7 +11,6 @@ import ( "time" "github.com/coreos/go-oidc/v3/oidc" - "github.com/juanfont/headscale/hscontrol/util" "github.com/prometheus/common/model" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -20,6 +19,8 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" + + "github.com/juanfont/headscale/hscontrol/util" ) const ( @@ -75,12 +76,15 @@ type SqliteConfig struct { } type PostgresConfig struct { - Host string - Port int - Name string - User string - Pass string - Ssl string + Host string + Port int + Name string + User string + Pass string + Ssl string + MaxOpenConnections int + MaxIdleConnections int + ConnMaxIdleTimeSecs int } type DatabaseConfig struct { @@ -213,6 +217,9 @@ func LoadConfig(path string, isFile bool) error { viper.SetDefault("db_ssl", false) viper.SetDefault("database.postgres.ssl", false) + viper.SetDefault("database.postgres.max_open_conns", 10) + viper.SetDefault("database.postgres.max_idle_conns", 10) + viper.SetDefault("database.postgres.conn_max_idle_time_secs", 3600) viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"}) viper.SetDefault("oidc.strip_email_domain", true) @@ -287,7 +294,7 @@ func LoadConfig(path string, isFile bool) error { } if errorText != "" { - //nolint + // nolint return errors.New(strings.TrimSuffix(errorText, "\n")) } else { return nil @@ -429,22 +436,30 @@ func GetDatabaseConfig() DatabaseConfig { case "sqlite": type_ = "sqlite3" default: - log.Fatal().Msgf("invalid database type %q, must be sqlite, sqlite3 or postgres", type_) + log.Fatal(). + Msgf("invalid database type %q, must be sqlite, sqlite3 or postgres", type_) } return DatabaseConfig{ Type: type_, Debug: debug, Sqlite: SqliteConfig{ - Path: util.AbsolutePathFromConfigPath(viper.GetString("database.sqlite.path")), + Path: util.AbsolutePathFromConfigPath( + viper.GetString("database.sqlite.path"), + ), }, Postgres: PostgresConfig{ - Host: viper.GetString("database.postgres.host"), - Port: viper.GetInt("database.postgres.port"), - Name: viper.GetString("database.postgres.name"), - User: viper.GetString("database.postgres.user"), - Pass: viper.GetString("database.postgres.pass"), - Ssl: viper.GetString("database.postgres.ssl"), + Host: viper.GetString("database.postgres.host"), + Port: viper.GetInt("database.postgres.port"), + Name: viper.GetString("database.postgres.name"), + User: viper.GetString("database.postgres.user"), + Pass: viper.GetString("database.postgres.pass"), + Ssl: viper.GetString("database.postgres.ssl"), + MaxOpenConnections: viper.GetInt("database.postgres.max_open_conns"), + MaxIdleConnections: viper.GetInt("database.postgres.max_idle_conns"), + ConnMaxIdleTimeSecs: viper.GetInt( + "database.postgres.conn_max_idle_time_secs", + ), }, } }