2023-05-26 10:26:34 +00:00
|
|
|
package mapper
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2023-07-17 09:13:48 +00:00
|
|
|
"io/fs"
|
2023-05-26 10:26:34 +00:00
|
|
|
"net/url"
|
2023-07-17 09:13:48 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
2023-12-09 17:09:24 +00:00
|
|
|
"slices"
|
2023-06-29 10:20:22 +00:00
|
|
|
"sort"
|
2023-05-26 10:26:34 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2023-07-17 09:13:48 +00:00
|
|
|
"sync/atomic"
|
2023-05-26 10:26:34 +00:00
|
|
|
"time"
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
"github.com/juanfont/headscale/hscontrol/db"
|
2024-04-21 16:28:17 +00:00
|
|
|
"github.com/juanfont/headscale/hscontrol/notifier"
|
2023-05-26 10:26:34 +00:00
|
|
|
"github.com/juanfont/headscale/hscontrol/policy"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
|
|
|
"github.com/klauspost/compress/zstd"
|
|
|
|
"github.com/rs/zerolog/log"
|
2023-07-17 09:13:48 +00:00
|
|
|
"tailscale.com/envknob"
|
2023-05-26 10:26:34 +00:00
|
|
|
"tailscale.com/smallzstd"
|
|
|
|
"tailscale.com/tailcfg"
|
|
|
|
"tailscale.com/types/dnstype"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
nextDNSDoHPrefix = "https://dns.nextdns.io"
|
|
|
|
reservedResponseHeaderSize = 4
|
2023-07-26 09:53:42 +00:00
|
|
|
mapperIDLength = 8
|
|
|
|
debugMapResponsePerm = 0o755
|
2023-05-26 10:26:34 +00:00
|
|
|
)
|
|
|
|
|
2023-07-17 09:13:48 +00:00
|
|
|
var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH")
|
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
// TODO: Optimise
|
|
|
|
// As this work continues, the idea is that there will be one Mapper instance
|
|
|
|
// per node, attached to the open stream between the control and client.
|
2023-09-24 11:42:05 +00:00
|
|
|
// This means that this can hold a state per node and we can use that to
|
2023-08-09 20:56:21 +00:00
|
|
|
// improve the mapresponses sent.
|
|
|
|
// We could:
|
|
|
|
// - Keep information about the previous mapresponse so we can send a diff
|
|
|
|
// - Store hashes
|
|
|
|
// - Create a "minifier" that removes info not needed for the node
|
2023-12-09 17:09:24 +00:00
|
|
|
// - some sort of batching, wait for 5 or 60 seconds before sending
|
2023-08-09 20:56:21 +00:00
|
|
|
|
2023-05-26 10:26:34 +00:00
|
|
|
type Mapper struct {
|
|
|
|
// Configuration
|
|
|
|
// TODO(kradalby): figure out if this is the format we want this in
|
2024-04-21 16:28:17 +00:00
|
|
|
db *db.HSDatabase
|
|
|
|
cfg *types.Config
|
|
|
|
derpMap *tailcfg.DERPMap
|
|
|
|
notif *notifier.Notifier
|
2024-10-26 16:42:14 +00:00
|
|
|
polMan policy.PolicyManager
|
2023-07-24 06:58:51 +00:00
|
|
|
|
|
|
|
uid string
|
|
|
|
created time.Time
|
|
|
|
seq uint64
|
2023-12-09 17:09:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type patch struct {
|
|
|
|
timestamp time.Time
|
|
|
|
change *tailcfg.PeerChange
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewMapper(
|
2024-02-23 09:59:24 +00:00
|
|
|
db *db.HSDatabase,
|
|
|
|
cfg *types.Config,
|
2023-05-26 10:26:34 +00:00
|
|
|
derpMap *tailcfg.DERPMap,
|
2024-04-21 16:28:17 +00:00
|
|
|
notif *notifier.Notifier,
|
2024-10-26 16:42:14 +00:00
|
|
|
polMan policy.PolicyManager,
|
2023-05-26 10:26:34 +00:00
|
|
|
) *Mapper {
|
2023-07-26 09:53:42 +00:00
|
|
|
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
2023-07-24 06:58:51 +00:00
|
|
|
|
2023-05-26 10:26:34 +00:00
|
|
|
return &Mapper{
|
2024-04-21 16:28:17 +00:00
|
|
|
db: db,
|
|
|
|
cfg: cfg,
|
|
|
|
derpMap: derpMap,
|
|
|
|
notif: notif,
|
2024-10-26 16:42:14 +00:00
|
|
|
polMan: polMan,
|
2023-07-24 06:58:51 +00:00
|
|
|
|
|
|
|
uid: uid,
|
|
|
|
created: time.Now(),
|
|
|
|
seq: 0,
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) String() string {
|
|
|
|
return fmt.Sprintf("Mapper: { seq: %d, uid: %s, created: %s }", m.seq, m.uid, m.created)
|
|
|
|
}
|
|
|
|
|
2023-05-26 10:26:34 +00:00
|
|
|
func generateUserProfiles(
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
|
|
|
peers types.Nodes,
|
2023-05-26 10:26:34 +00:00
|
|
|
) []tailcfg.UserProfile {
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 12:50:17 +00:00
|
|
|
userMap := make(map[uint]types.User)
|
|
|
|
userMap[node.User.ID] = node.User
|
2023-05-26 10:26:34 +00:00
|
|
|
for _, peer := range peers {
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 12:50:17 +00:00
|
|
|
userMap[peer.User.ID] = peer.User // not worth checking if already is there
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2024-06-23 20:06:50 +00:00
|
|
|
var profiles []tailcfg.UserProfile
|
2023-05-26 10:26:34 +00:00
|
|
|
for _, user := range userMap {
|
|
|
|
profiles = append(profiles,
|
2024-07-19 07:03:18 +00:00
|
|
|
user.TailscaleUserProfile())
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return profiles
|
|
|
|
}
|
|
|
|
|
|
|
|
func generateDNSConfig(
|
2024-06-26 11:44:40 +00:00
|
|
|
cfg *types.Config,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-05-26 10:26:34 +00:00
|
|
|
) *tailcfg.DNSConfig {
|
2024-06-26 11:44:40 +00:00
|
|
|
if cfg.DNSConfig == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-05-26 10:26:34 +00:00
|
|
|
|
2024-06-26 11:44:40 +00:00
|
|
|
dnsConfig := cfg.DNSConfig.Clone()
|
2023-05-26 10:26:34 +00:00
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
addNextDNSMetadata(dnsConfig.Resolvers, node)
|
2023-05-26 10:26:34 +00:00
|
|
|
|
|
|
|
return dnsConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any nextdns DoH resolvers are present in the list of resolvers it will
|
2023-09-24 11:42:05 +00:00
|
|
|
// take metadata from the node metadata and instruct tailscale to add it
|
2023-05-26 10:26:34 +00:00
|
|
|
// to the requests. This makes it possible to identify from which device the
|
|
|
|
// requests come in the NextDNS dashboard.
|
|
|
|
//
|
|
|
|
// This will produce a resolver like:
|
|
|
|
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
2023-09-24 11:42:05 +00:00
|
|
|
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
2023-05-26 10:26:34 +00:00
|
|
|
for _, resolver := range resolvers {
|
|
|
|
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
|
|
|
attrs := url.Values{
|
2023-09-24 11:42:05 +00:00
|
|
|
"device_name": []string{node.Hostname},
|
2023-11-21 17:20:06 +00:00
|
|
|
"device_model": []string{node.Hostinfo.OS},
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2024-04-17 05:03:06 +00:00
|
|
|
if len(node.IPs()) > 0 {
|
|
|
|
attrs.Add("device_ip", node.IPs()[0].String())
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
// fullMapResponse creates a complete MapResponse for a node.
|
|
|
|
// It is a separate function to make testing easier.
|
|
|
|
func (m *Mapper) fullMapResponse(
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2024-02-23 09:59:24 +00:00
|
|
|
peers types.Nodes,
|
2023-11-23 07:31:33 +00:00
|
|
|
capVer tailcfg.CapabilityVersion,
|
2023-08-09 20:56:21 +00:00
|
|
|
) (*tailcfg.MapResponse, error) {
|
2024-10-26 16:42:14 +00:00
|
|
|
resp, err := m.baseWithConfigMapResponse(node, capVer)
|
2023-08-09 20:56:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = appendPeerChanges(
|
|
|
|
resp,
|
2024-02-23 09:59:24 +00:00
|
|
|
true, // full change
|
2024-10-26 16:42:14 +00:00
|
|
|
m.polMan,
|
2023-09-24 11:42:05 +00:00
|
|
|
node,
|
2023-11-23 07:31:33 +00:00
|
|
|
capVer,
|
2023-08-09 20:56:21 +00:00
|
|
|
peers,
|
2024-02-23 09:59:24 +00:00
|
|
|
m.cfg,
|
2023-08-09 20:56:21 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
// FullMapResponse returns a MapResponse for the given node.
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) FullMapResponse(
|
2023-05-26 10:26:34 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2024-02-23 09:59:24 +00:00
|
|
|
messages ...string,
|
2023-05-26 10:26:34 +00:00
|
|
|
) ([]byte, error) {
|
2024-02-23 09:59:24 +00:00
|
|
|
peers, err := m.ListPeers(node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2023-12-09 17:09:24 +00:00
|
|
|
}
|
|
|
|
|
2024-10-26 17:27:12 +00:00
|
|
|
resp, err := m.fullMapResponse(node, peers, mapRequest.Version)
|
2023-05-26 10:26:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...)
|
2023-06-29 10:20:22 +00:00
|
|
|
}
|
2023-05-26 10:26:34 +00:00
|
|
|
|
2024-09-09 12:17:25 +00:00
|
|
|
// ReadOnlyMapResponse returns a MapResponse for the given node.
|
2023-07-26 12:42:12 +00:00
|
|
|
// Lite means that the peers has been omitted, this is intended
|
2023-07-26 11:55:03 +00:00
|
|
|
// to be used to answer MapRequests with OmitPeers set to true.
|
2024-02-23 09:59:24 +00:00
|
|
|
func (m *Mapper) ReadOnlyMapResponse(
|
2023-07-26 11:55:03 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2024-02-08 16:28:19 +00:00
|
|
|
messages ...string,
|
2023-07-26 11:55:03 +00:00
|
|
|
) ([]byte, error) {
|
2024-10-26 16:42:14 +00:00
|
|
|
resp, err := m.baseWithConfigMapResponse(node, mapRequest.Version)
|
2023-07-26 11:55:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-02-08 16:28:19 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...)
|
2023-07-26 11:55:03 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) KeepAliveResponse(
|
2023-06-29 10:20:22 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-06-29 10:20:22 +00:00
|
|
|
) ([]byte, error) {
|
2023-08-09 20:56:21 +00:00
|
|
|
resp := m.baseMapResponse()
|
2023-06-29 10:20:22 +00:00
|
|
|
resp.KeepAlive = true
|
2023-05-26 10:26:34 +00:00
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) DERPMapResponse(
|
2023-05-26 10:26:34 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-12-09 17:09:24 +00:00
|
|
|
derpMap *tailcfg.DERPMap,
|
2023-05-26 10:26:34 +00:00
|
|
|
) ([]byte, error) {
|
2023-12-09 17:09:24 +00:00
|
|
|
m.derpMap = derpMap
|
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
resp := m.baseMapResponse()
|
2023-12-09 17:09:24 +00:00
|
|
|
resp.DERPMap = derpMap
|
2023-05-26 10:26:34 +00:00
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-06-29 10:20:22 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) PeerChangedResponse(
|
2023-06-29 10:20:22 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2024-02-23 09:59:24 +00:00
|
|
|
changed map[types.NodeID]bool,
|
|
|
|
patches []*tailcfg.PeerChange,
|
2023-12-09 17:09:24 +00:00
|
|
|
messages ...string,
|
2023-06-29 10:20:22 +00:00
|
|
|
) ([]byte, error) {
|
2024-02-23 09:59:24 +00:00
|
|
|
resp := m.baseMapResponse()
|
2023-06-29 10:20:22 +00:00
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
peers, err := m.ListPeers(node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-12-09 17:09:24 +00:00
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
var removedIDs []tailcfg.NodeID
|
|
|
|
var changedIDs []types.NodeID
|
|
|
|
for nodeID, nodeChanged := range changed {
|
|
|
|
if nodeChanged {
|
|
|
|
changedIDs = append(changedIDs, nodeID)
|
|
|
|
} else {
|
|
|
|
removedIDs = append(removedIDs, nodeID.NodeID())
|
2023-12-09 17:09:24 +00:00
|
|
|
}
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
changedNodes := make(types.Nodes, 0, len(changedIDs))
|
|
|
|
for _, peer := range peers {
|
|
|
|
if slices.Contains(changedIDs, peer.ID) {
|
|
|
|
changedNodes = append(changedNodes, peer)
|
|
|
|
}
|
|
|
|
}
|
2023-08-09 20:56:21 +00:00
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
err = appendPeerChanges(
|
2023-08-09 20:56:21 +00:00
|
|
|
&resp,
|
2024-02-23 09:59:24 +00:00
|
|
|
false, // partial change
|
2024-10-26 16:42:14 +00:00
|
|
|
m.polMan,
|
2023-09-24 11:42:05 +00:00
|
|
|
node,
|
2023-11-23 07:31:33 +00:00
|
|
|
mapRequest.Version,
|
2024-02-23 09:59:24 +00:00
|
|
|
changedNodes,
|
|
|
|
m.cfg,
|
2023-06-29 10:20:22 +00:00
|
|
|
)
|
2023-05-26 10:26:34 +00:00
|
|
|
if err != nil {
|
2023-06-29 10:20:22 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
resp.PeersRemoved = removedIDs
|
|
|
|
|
|
|
|
// Sending patches as a part of a PeersChanged response
|
|
|
|
// is technically not suppose to be done, but they are
|
|
|
|
// applied after the PeersChanged. The patch list
|
|
|
|
// should _only_ contain Nodes that are not in the
|
|
|
|
// PeersChanged or PeersRemoved list and the caller
|
|
|
|
// should filter them out.
|
|
|
|
//
|
|
|
|
// From tailcfg docs:
|
|
|
|
// These are applied after Peers* above, but in practice the
|
|
|
|
// control server should only send these on their own, without
|
|
|
|
// the Peers* fields also set.
|
|
|
|
if patches != nil {
|
|
|
|
resp.PeersChangedPatch = patches
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the node itself, it might have changed, and particularly
|
|
|
|
// if there are no patches or changes, this is a self update.
|
2024-10-26 16:42:14 +00:00
|
|
|
tailnode, err := tailNode(node, mapRequest.Version, m.polMan, m.cfg)
|
2024-02-23 09:59:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resp.Node = tailnode
|
|
|
|
|
2023-12-09 17:09:24 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerChangedPatchResponse creates a patch MapResponse with
|
|
|
|
// incoming update from a state change.
|
|
|
|
func (m *Mapper) PeerChangedPatchResponse(
|
|
|
|
mapRequest tailcfg.MapRequest,
|
|
|
|
node *types.Node,
|
|
|
|
changed []*tailcfg.PeerChange,
|
|
|
|
) ([]byte, error) {
|
|
|
|
resp := m.baseMapResponse()
|
|
|
|
resp.PeersChangedPatch = changed
|
2023-06-29 10:20:22 +00:00
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 06:58:51 +00:00
|
|
|
func (m *Mapper) marshalMapResponse(
|
2023-07-26 12:42:12 +00:00
|
|
|
mapRequest tailcfg.MapRequest,
|
2023-06-29 10:20:22 +00:00
|
|
|
resp *tailcfg.MapResponse,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-06-29 10:20:22 +00:00
|
|
|
compression string,
|
2023-12-09 17:09:24 +00:00
|
|
|
messages ...string,
|
2023-06-29 10:20:22 +00:00
|
|
|
) ([]byte, error) {
|
2023-07-24 06:58:51 +00:00
|
|
|
atomic.AddUint64(&m.seq, 1)
|
|
|
|
|
2023-05-26 10:26:34 +00:00
|
|
|
jsonBody, err := json.Marshal(resp)
|
|
|
|
if err != nil {
|
2024-04-12 13:57:43 +00:00
|
|
|
return nil, fmt.Errorf("marshalling map response: %w", err)
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-17 09:13:48 +00:00
|
|
|
if debugDumpMapResponsePath != "" {
|
|
|
|
data := map[string]interface{}{
|
2023-12-09 17:09:24 +00:00
|
|
|
"Messages": messages,
|
2023-07-17 09:13:48 +00:00
|
|
|
"MapRequest": mapRequest,
|
|
|
|
"MapResponse": resp,
|
|
|
|
}
|
|
|
|
|
2023-12-09 17:09:24 +00:00
|
|
|
responseType := "keepalive"
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case resp.Peers != nil && len(resp.Peers) > 0:
|
|
|
|
responseType = "full"
|
2024-02-23 09:59:24 +00:00
|
|
|
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil && resp.DERPMap == nil && !resp.KeepAlive:
|
2024-02-08 16:28:19 +00:00
|
|
|
responseType = "self"
|
2023-12-09 17:09:24 +00:00
|
|
|
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
|
|
|
|
responseType = "changed"
|
|
|
|
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
|
|
|
|
responseType = "patch"
|
|
|
|
case resp.PeersRemoved != nil && len(resp.PeersRemoved) > 0:
|
|
|
|
responseType = "removed"
|
|
|
|
}
|
|
|
|
|
|
|
|
body, err := json.MarshalIndent(data, "", " ")
|
2023-07-17 09:13:48 +00:00
|
|
|
if err != nil {
|
2024-04-12 13:57:43 +00:00
|
|
|
return nil, fmt.Errorf("marshalling map response: %w", err)
|
2023-07-17 09:13:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
perms := fs.FileMode(debugMapResponsePerm)
|
2023-09-24 11:42:05 +00:00
|
|
|
mPath := path.Join(debugDumpMapResponsePath, node.Hostname)
|
2023-07-17 09:13:48 +00:00
|
|
|
err = os.MkdirAll(mPath, perms)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
now := time.Now().Format("2006-01-02T15-04-05.999999999")
|
2023-07-17 09:13:48 +00:00
|
|
|
|
|
|
|
mapResponsePath := path.Join(
|
|
|
|
mPath,
|
2024-02-23 09:59:24 +00:00
|
|
|
fmt.Sprintf("%s-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType),
|
2023-07-17 09:13:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
2023-07-26 12:42:12 +00:00
|
|
|
err = os.WriteFile(mapResponsePath, body, perms)
|
2023-07-17 09:13:48 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-26 10:26:34 +00:00
|
|
|
var respBody []byte
|
|
|
|
if compression == util.ZstdCompression {
|
|
|
|
respBody = zstdEncode(jsonBody)
|
|
|
|
} else {
|
2023-11-23 07:31:33 +00:00
|
|
|
respBody = jsonBody
|
2023-05-26 10:26:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
data := make([]byte, reservedResponseHeaderSize)
|
|
|
|
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
|
|
|
data = append(data, respBody...)
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func zstdEncode(in []byte) []byte {
|
|
|
|
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
|
|
|
|
if !ok {
|
|
|
|
panic("invalid type in sync pool")
|
|
|
|
}
|
|
|
|
out := encoder.EncodeAll(in, nil)
|
|
|
|
_ = encoder.Close()
|
|
|
|
zstdEncoderPool.Put(encoder)
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
var zstdEncoderPool = &sync.Pool{
|
|
|
|
New: func() any {
|
|
|
|
encoder, err := smallzstd.NewEncoder(
|
|
|
|
nil,
|
|
|
|
zstd.WithEncoderLevel(zstd.SpeedFastest))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return encoder
|
|
|
|
},
|
|
|
|
}
|
2023-06-29 10:20:22 +00:00
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
// baseMapResponse returns a tailcfg.MapResponse with
|
|
|
|
// KeepAlive false and ControlTime set to now.
|
|
|
|
func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
|
2023-06-29 10:20:22 +00:00
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
resp := tailcfg.MapResponse{
|
|
|
|
KeepAlive: false,
|
|
|
|
ControlTime: &now,
|
2023-12-09 17:09:24 +00:00
|
|
|
// TODO(kradalby): Implement PingRequest?
|
2023-06-29 10:20:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|
2023-08-09 20:20:05 +00:00
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
// baseWithConfigMapResponse returns a tailcfg.MapResponse struct
|
|
|
|
// with the basic configuration from headscale set.
|
|
|
|
// It is used in for bigger updates, such as full and lite, not
|
|
|
|
// incremental.
|
|
|
|
func (m *Mapper) baseWithConfigMapResponse(
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-11-23 07:31:33 +00:00
|
|
|
capVer tailcfg.CapabilityVersion,
|
2023-08-09 20:56:21 +00:00
|
|
|
) (*tailcfg.MapResponse, error) {
|
|
|
|
resp := m.baseMapResponse()
|
|
|
|
|
2024-10-26 16:42:14 +00:00
|
|
|
tailnode, err := tailNode(node, capVer, m.polMan, m.cfg)
|
2023-08-09 20:56:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resp.Node = tailnode
|
|
|
|
|
|
|
|
resp.DERPMap = m.derpMap
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
resp.Domain = m.cfg.BaseDomain
|
2023-08-09 20:56:21 +00:00
|
|
|
|
|
|
|
// Do not instruct clients to collect services we do not
|
|
|
|
// support or do anything with them
|
|
|
|
resp.CollectServices = "false"
|
|
|
|
|
|
|
|
resp.KeepAlive = false
|
|
|
|
|
|
|
|
resp.Debug = &tailcfg.Debug{
|
2024-02-23 09:59:24 +00:00
|
|
|
DisableLogTail: !m.cfg.LogTail.Enabled,
|
2023-08-09 20:56:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &resp, nil
|
|
|
|
}
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
func (m *Mapper) ListPeers(nodeID types.NodeID) (types.Nodes, error) {
|
|
|
|
peers, err := m.db.ListPeers(nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, peer := range peers {
|
2024-04-21 16:28:17 +00:00
|
|
|
online := m.notif.IsLikelyConnected(peer.ID)
|
2024-02-23 09:59:24 +00:00
|
|
|
peer.IsOnline = &online
|
|
|
|
}
|
|
|
|
|
|
|
|
return peers, nil
|
|
|
|
}
|
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
|
|
|
ret := make(types.Nodes, 0)
|
2023-08-09 20:20:05 +00:00
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
for _, node := range nodes {
|
|
|
|
ret = append(ret, node)
|
2023-08-09 20:20:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
// appendPeerChanges mutates a tailcfg.MapResponse with all the
|
|
|
|
// necessary changes when peers have changed.
|
|
|
|
func appendPeerChanges(
|
|
|
|
resp *tailcfg.MapResponse,
|
|
|
|
|
2024-02-23 09:59:24 +00:00
|
|
|
fullChange bool,
|
2024-10-26 16:42:14 +00:00
|
|
|
polMan policy.PolicyManager,
|
2023-09-24 11:42:05 +00:00
|
|
|
node *types.Node,
|
2023-09-28 19:33:53 +00:00
|
|
|
capVer tailcfg.CapabilityVersion,
|
2023-09-24 11:42:05 +00:00
|
|
|
changed types.Nodes,
|
2024-02-23 09:59:24 +00:00
|
|
|
cfg *types.Config,
|
2023-08-09 20:56:21 +00:00
|
|
|
) error {
|
2024-10-26 16:42:14 +00:00
|
|
|
filter := polMan.Filter()
|
2024-02-23 09:59:24 +00:00
|
|
|
|
2024-10-26 16:42:14 +00:00
|
|
|
sshPolicy, err := polMan.SSHPolicy(node)
|
2023-08-09 20:56:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-09-24 11:42:05 +00:00
|
|
|
// If there are filter rules present, see if there are any nodes that cannot
|
2024-09-09 12:17:25 +00:00
|
|
|
// access each-other at all and remove them from the peers.
|
2024-10-26 16:42:14 +00:00
|
|
|
if len(filter) > 0 {
|
|
|
|
changed = policy.FilterNodesByACL(node, changed, filter)
|
2023-08-09 20:56:21 +00:00
|
|
|
}
|
|
|
|
|
2024-07-19 07:03:18 +00:00
|
|
|
profiles := generateUserProfiles(node, changed)
|
2023-08-09 20:56:21 +00:00
|
|
|
|
2024-10-23 15:45:59 +00:00
|
|
|
dnsConfig := generateDNSConfig(cfg, node)
|
2023-08-09 20:56:21 +00:00
|
|
|
|
2024-10-26 16:42:14 +00:00
|
|
|
tailPeers, err := tailNodes(changed, capVer, polMan, cfg)
|
2023-08-09 20:56:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers is always returned sorted by Node.ID.
|
|
|
|
sort.SliceStable(tailPeers, func(x, y int) bool {
|
|
|
|
return tailPeers[x].ID < tailPeers[y].ID
|
|
|
|
})
|
|
|
|
|
|
|
|
if fullChange {
|
|
|
|
resp.Peers = tailPeers
|
|
|
|
} else {
|
|
|
|
resp.PeersChanged = tailPeers
|
|
|
|
}
|
|
|
|
resp.DNSConfig = dnsConfig
|
|
|
|
resp.UserProfiles = profiles
|
|
|
|
resp.SSHPolicy = sshPolicy
|
|
|
|
|
2024-07-18 05:38:25 +00:00
|
|
|
// 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
|
|
|
|
if capVer >= 81 {
|
|
|
|
// Currently, we do not send incremental package filters, however using the
|
|
|
|
// new PacketFilters field and "base" allows us to send a full update when we
|
|
|
|
// have to send an empty list, avoiding the hack in the else block.
|
|
|
|
resp.PacketFilters = map[string][]tailcfg.FilterRule{
|
2024-10-26 16:42:14 +00:00
|
|
|
"base": policy.ReduceFilterRules(node, filter),
|
2024-07-18 05:38:25 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This is a hack to avoid sending an empty list of packet filters.
|
|
|
|
// Since tailcfg.PacketFilter has omitempty, any empty PacketFilter will
|
2024-09-09 12:17:25 +00:00
|
|
|
// be omitted, causing the client to consider it unchanged, keeping the
|
2024-07-18 05:38:25 +00:00
|
|
|
// previous packet filter. Worst case, this can cause a node that previously
|
|
|
|
// has access to a node to _not_ loose access if an empty (allow none) is sent.
|
2024-10-26 16:42:14 +00:00
|
|
|
reduced := policy.ReduceFilterRules(node, filter)
|
2024-07-18 05:38:25 +00:00
|
|
|
if len(reduced) > 0 {
|
|
|
|
resp.PacketFilter = reduced
|
|
|
|
} else {
|
2024-10-26 16:42:14 +00:00
|
|
|
resp.PacketFilter = filter
|
2024-07-18 05:38:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-09 20:56:21 +00:00
|
|
|
return nil
|
|
|
|
}
|