Update metrics for new code

This commit is contained in:
Kristoffer Dalby 2021-10-05 21:59:15 +00:00
parent 722084fbd3
commit c582c8d206
2 changed files with 18 additions and 16 deletions

View file

@ -26,16 +26,16 @@ var (
Namespace: prometheusNamespace,
Name: "update_request_from_node_total",
Help: "The number of updates requested by a node/update function",
}, []string{"state"})
updateRequestsToNode = promauto.NewCounter(prometheus.CounterOpts{
}, []string{"namespace", "machine", "state"})
updateRequestsSentToNode = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: prometheusNamespace,
Name: "update_request_to_node_total",
Name: "update_request_sent_to_node_total",
Help: "The number of calls/messages issued on a specific nodes update channel",
})
}, []string{"namespace", "machine", "status"})
//TODO(kradalby): This is very debugging, we might want to remove it.
updateRequestsReceivedOnChannel = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: prometheusNamespace,
Name: "update_request_received_on_channel_total",
Help: "The number of update requests received on an update channel",
}, []string{"machine"})
}, []string{"namespace", "machine"})
)

24
poll.go
View file

@ -158,7 +158,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
// It sounds like we should update the nodes when we have received a endpoint update
// even tho the comments in the tailscale code dont explicitly say so.
updateRequestsFromNode.WithLabelValues("endpoint-update").Inc()
updateRequestsFromNode.WithLabelValues(m.Name, m.Namespace.Name, "endpoint-update").Inc()
go func() { updateChan <- struct{}{} }()
return
} else if req.OmitPeers && req.Stream {
@ -184,7 +184,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Notifying peers")
updateRequestsFromNode.WithLabelValues("full-update").Inc()
updateRequestsFromNode.WithLabelValues(m.Name, m.Namespace.Name, "full-update").Inc()
go func() { updateChan <- struct{}{} }()
h.PollNetMapStream(c, m, req, mKey, pollDataChan, keepAliveChan, updateChan, cancelKeepAlive)
@ -324,7 +324,7 @@ func (h *Headscale) PollNetMapStream(
Str("machine", m.Name).
Str("channel", "update").
Msg("Received a request for update")
updateRequestsReceivedOnChannel.WithLabelValues(m.Name).Inc()
updateRequestsReceivedOnChannel.WithLabelValues(m.Name, m.Namespace.Name).Inc()
if h.isOutdated(m) {
log.Debug().
Str("handler", "PollNetMapStream").
@ -349,6 +349,7 @@ func (h *Headscale) PollNetMapStream(
Str("channel", "update").
Err(err).
Msg("Could not write the map response")
updateRequestsSentToNode.WithLabelValues(m.Name, m.Namespace.Name, "failed").Inc()
return false
}
log.Trace().
@ -356,14 +357,15 @@ func (h *Headscale) PollNetMapStream(
Str("machine", m.Name).
Str("channel", "update").
Msg("Updated Map has been sent")
updateRequestsSentToNode.WithLabelValues(m.Name, m.Namespace.Name, "success").Inc()
// Keep track of the last successful update,
// we sometimes end in a state were the update
// is not picked up by a client and we use this
// to determine if we should "force" an update.
// TODO(kradalby): Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
// Keep track of the last successful update,
// we sometimes end in a state were the update
// is not picked up by a client and we use this
// to determine if we should "force" an update.
// TODO(kradalby): Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(m)
if err != nil {
log.Error().
@ -481,7 +483,7 @@ func (h *Headscale) scheduledPollWorker(
Str("func", "scheduledPollWorker").
Str("machine", m.Name).
Msg("Sending update request")
updateRequestsFromNode.WithLabelValues("scheduled-update").Inc()
updateRequestsFromNode.WithLabelValues(m.Name, m.Namespace.Name, "scheduled-update").Inc()
updateChan <- struct{}{}
}
}